decimal.Decimal

Here are the examples of the python api decimal.Decimal taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

149 Examples 7

Example 101

Project: element43 Source File: functions.py
def calculate_manufacturing_job(form_data):
    """
    Calculates the manufacturing costs and profits.
    """

    #
    # This method is basically divided in two sections:
    #
    # 1. Calculate bill of materials
    # 2. Calculate production time
    #

    result = {}  # result dictionary which will be returned
    blueprint_type_id = int(form_data['blueprint_type_id'])
    blueprint_runs = int(form_data['blueprint_runs'])
    blueprint = InvBlueprintType.objects.select_related().get(blueprint_type__id=blueprint_type_id)
    result['produced_units'] = blueprint.product_type.portion_size * blueprint_runs

    # --------------------------------------------------------------------------
    # Calculate bill of materials
    # --------------------------------------------------------------------------
    materials = get_materials(form_data, blueprint)
    materials = calculate_material_prices(materials)
    materials_cost_total = math.fsum([material['price_total'] for material in materials])
    materials_volume_total = math.fsum([material['volume'] for material in materials])

    # sort materials by name:
    materials.sort(key=lambda material: material['name'])

    result['materials'] = materials
    result['materials_cost_unit'] = materials_cost_total / result['produced_units']
    result['materials_cost_total'] = materials_cost_total
    result['materials_volume_total'] = materials_volume_total

    # --------------------------------------------------------------------------
    # Calculate production time
    # --------------------------------------------------------------------------
    production_time = calculate_production_time(form_data, blueprint)

    result['production_time_run'] = round(production_time)
    result['production_time_total'] = round(production_time * blueprint_runs)

    # add all the other values to the result dictionary
    result['blueprint_cost_unit'] = form_data['blueprint_price'] / result['produced_units']
    result['blueprint_cost_total'] = form_data['blueprint_price']
    result['revenue_unit'] = form_data['target_sell_price']
    result['revenue_total'] = form_data['target_sell_price'] * result['produced_units']
    result['blueprint_type_id'] = blueprint_type_id
    result['blueprint_name'] = blueprint.blueprint_type.name
    result['blueprint_runs'] = blueprint_runs

    brokers_fee = form_data.get('brokers_fee', 0)
    sales_tax = form_data.get('sales_tax', 0)

    if not brokers_fee:
        brokers_fee = 0

    if not sales_tax:
        sales_tax = 0

    result['brokers_fee_unit'] = result['revenue_unit'] * (brokers_fee / 100)
    result['brokers_fee_total'] = result['brokers_fee_unit'] * result['produced_units']
    result['sales_tax_unit'] = result['revenue_unit'] * (sales_tax / 100)
    result['sales_tax_total'] = result['sales_tax_unit'] * result['produced_units']

    result['total_cost_unit'] = result['brokers_fee_unit'] + result['sales_tax_unit'] + result['blueprint_cost_unit'] + Decimal((materials_cost_total / result['produced_units']))
    result['total_cost_total'] = result['total_cost_unit'] * result['produced_units']

    result['profit_unit'] = form_data['target_sell_price'] - result['total_cost_unit']
    result['profit_total'] = result['profit_unit'] * result['produced_units']
    result['profit_total_hour'] = result['profit_total'] / Decimal(result['production_time_total'] / 3600)
    result['profit_total_day'] = result['profit_total_hour'] * 24

    if result['profit_total'] != 0 and result['total_cost_total'] != 0:
        result['profit_total_percent'] = (result['profit_total'] / result['total_cost_total']) * 100
    else:
        result['profit_total_percent'] = 0

    return result

Example 102

Project: rabbit4mt4 Source File: json_rpc_db_amqp_client.py
@click.command()
@click.option('--dbengine', help="DB engine.", default="mysql")
@click.option('--dbhost', help="DB host.", default="127.0.0.1")
@click.option('--db', help="DB name.", default="test")
@click.option('--dbuser', help="DB user.", default="root")
@click.option('--dbpassword', help="DB password.", default="123456")
@click.option('--dbtablesprefix', help="Tables prefix. ('prefix_')", default="")

@click.option('--terminal_id', help="Terminal ID.", default="mt4_demo01_123456")

@click.option('--create/--no-create', default=False, help="Create table.")
@click.option('--truncate/--no-truncate', default=False, help="Truncate table.")
@click.option('--drop/--no-drop', default=False, help="Drop table.")
@click.option('--insert/--no-insert', default=True, help='Insert data.')

@click.option('--method', help="message.", default="Echo")
@click.option('--message', help="message.", default="Hello Eiffel")
@click.option('--n', help="N", default=10)
@click.option('--n1', help="N1", default=6)
@click.option('--n2', help="N2", default=3)
@click.option('--ticket', help="ticket", default=-1) # ticket = 237696636 > 0 (SELECT_BY_TICKET) ; -2 (SELECT_BY_POS)
#@click.option('--pos', help="ticket", default=None) # index+1 SELECT_BY_POS
@click.option('--price', help="price", default=-1.0)
@click.option('--stoploss', help="stoploss", default=-1.0)
@click.option('--takeprofit', help="takeprofit", default=-1.0)
@click.option('--slippage', help="slippage", default=-1)
@click.option('--volume', help="volume", default=-1.0)
@click.option('--symbol', help="symbol", default="EURUSD")
@click.option('--timeframe', help="symbol", default="H1")
@click.option('--cmd', help="cmd", default="BUY")
@click.option('--comment', help="comment", default="sent from script")
@click.option('--magic', help="magic", default=123)
@click.option('--expiration', help="magic", default="1970-01-01T00:00:00+00:00")

@click.option('--mqhost', help="MQ host", default="localhost")
@click.option('--mquser', help="MQ user", default="guest")
@click.option('--mqpassword', help="MQ password", default="guest")

@click.option('--timeout', help="timeout.", default=5)

def main(dbengine, dbhost, db, dbuser, dbpassword, dbtablesprefix,
            mqhost, mquser, mqpassword, timeout,
            terminal_id, method, message, n, n1, n2, ticket, price, stoploss, takeprofit, slippage, volume, symbol, timeframe, cmd, comment, magic, expiration, 
            create, truncate, drop, insert):
    table_name = "json_rpc"
    
    config_db = Config_DB_Default()
    config_db.tablesprefix =  dbtablesprefix
    config_db.table_name = table_name
    config_db.dbengine = dbengine
    config_db.host = dbhost
    config_db.database = db
    config_db.user = dbuser
    config_db.password = dbpassword

    config_amqp = Config_AMQP_Default()
    config_amqp.host = mqhost
    config_amqp.username = mquser
    config_amqp.password = mqpassword
    config_amqp.timeout = timeout
    
    b = MT4_MySQL_RabbitMQ_Bridge(terminal_id, config_db, config_amqp).use_terminal(terminal_id)
    
    if stoploss<0:
        stoploss = 0
    
    if takeprofit<0:
        takeprofit = 0

    if volume<0:
        volume=Decimal("0.01")            

    #expiration = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + datetime.timedelta(hours=8)
        
    pp = pprint.PrettyPrinter(indent=4)
    
    if drop:
        b.tables_drop()
        return
    
    if truncate:
        b.tables_truncate()
        return
    
    if create:
        b.tables_create()
    
    if insert:
        #b._call("comment", "%s @ %s" % (message, datetime.datetime.utcnow()))
        #b._call("add", "2", "3")
        #b._call("xx", "%s @ %s" % (message, datetime.datetime.utcnow()))
        #b.Comment("Hello world!")
        #b.IDN()
        #result = b.Echo(message)
        method = method.lower()
        if method=='idn':
            result = b.IDN()
            logging.info("result: %s" % result)
            logging.info("type(result): %s" % type(result))
        elif method=='print':
            result = b.Print(message)
            logging.info("result: %s" % result)
            logging.info("type(result): %s" % type(result))
        elif method=='comment':
            result = b.Comment(message)
            logging.info("result: %s" % result)
            logging.info("type(result): %s" % type(result))
        elif method=='echo':
            result = b.Echo(message)
            logging.info("result: %s" % result)
            logging.info("type(result): %s" % type(result))
        elif method=='add':
            result = b.Add(n1,n2)
            logging.info("result: %s" % result)
            logging.info("type(result): %s" % type(result))
        elif method in ['accountinformation', 'accountinfo']:
            result = b.AccountInformation()
            logging.info("timestamp: %r" % result[0])
            logging.info("result: %s" % result[1])
            #logging.info("result: %s" % pp.pformat(result[1]))
            logging.info("type(result): %s" % type(result[1]))
        elif method in ['marketinfo', 'symbolinfo']:
            result = b.MarketInfo(symbol)
            logging.info("timestamp: %r" % result[0])
            logging.info("result: %s" % result[1])
            #logging.info("result: %s" % pp.pformat(result[1]))
            logging.info("type(result): %s" % type(result[1]))
        elif method in ['accounthistory', 'history']:
            result = b.AccountHistory(n)
            logging.info("timestamp: %r" % result[0])
            logging.info("result: \n%s" % result[1])
            logging.info("type(result): %s" % type(result[1]))
            logging.info("result.dtypes: %r" % result[1].dtypes)
            logging.info("profit: %r" % result[1]["profit"].sum())
        elif method in ['accounttrades', 'trades']:
            result = b.AccountTrades(n)
            logging.info("timestamp: %r" % result[0])
            logging.info("result: \n%s" % result[1])
            logging.info("type(result): %s" % type(result[1]))
            logging.info("result.dtypes: %r" % result[1].dtypes)
            logging.info("profit: %r" % result[1]["profit"].sum())
        elif method=='quotes':
            result = b.Quotes(symbol,timeframe,n)
            logging.info("timestamp: %r" % result[0])
            logging.info("result: %s" % result[1])
            logging.info("type(result): %s" % type(result[1]))
            logging.info("result.dtypes: %r" % result[1].dtypes)
        elif method=='ordersend':
            (timestamp, data) = b.OrderSend(symbol, cmd, volume, price, slippage, stoploss, takeprofit, comment, magic, expiration)
            logging.info("result: %s" % data)
            logging.info("type(result): %s" % type(data))
        elif method=='orderclose':
            (ticket, volume, price, slippage) = (ticket, volume, price, slippage)
            (timestamp, data) = b.OrderClose(ticket, volume, price, slippage)
            logging.info("result: %s" % data)
            logging.info("type(result): %s" % type(data))
        elif method=='orderdelete':
            (timestamp, data) = b.OrderDelete(ticket)
            logging.info("result: %s" % data)
            logging.info("type(result): %s" % type(data))
        elif method=='ordermodify':
            (timestamp, data) = b.OrderModify(ticket, price, stoploss, takeprofit, expiration)
            logging.info("result: %s" % data)
            logging.info("type(result): %s" % type(data))
        
# ToDo: MarketInfo http://docs.mql4.com/constants/environment_state/marketinfoconstants#enum_symbol_info_string        
        
        else:
            logging.warning("Undef method %r" % method)
            raise(NotImplementedError)

Example 103

Project: aemanager Source File: views.py
@csrf_exempt
@commit_on_success
def paypal_ipn(request):
    # send back the response to paypal
    data = dict(request.POST.items())
    args = {'cmd': '_notify-validate'}
    args.update(data)
    params = urllib.urlencode(dict([k, v.encode('utf-8')] for k, v in args.items()))
    paypal_response = urllib2.urlopen(settings.PAYPAL_URL + '/cgi-bin/webscr', params).read()

    # process the payment
    receiver_id = data['receiver_id']
    transaction_id = data['txn_id']
    payment_status = data['payment_status']
    payment_amount = data['mc_gross']
    payment_currency = data['mc_currency']
    fee = data['mc_fee']
    item_name = data['item_name']
    user_id = data['custom']
    user = get_object_or_404(User, pk=user_id)
    profile = user.get_profile()
    last_subscription = profile.get_last_subscription()

    subscription, created = Subscription.objects.get_or_create(transaction_id=transaction_id,
                                                               defaults={'owner': user,
                                                                         'state': SUBSCRIPTION_STATE_NOT_PAID,
                                                                         'expiration_date': profile.get_next_expiration_date(),
                                                                         'transaction_id': transaction_id,
                                                                         'error_message': ugettext('Not verified')})

    if paypal_response == 'VERIFIED':
        if receiver_id <> settings.PAYPAL_RECEIVER_ID:
            subscription.error_message = ugettext('Receiver is not as defined in settings. Spoofing ?')
        elif payment_status <> 'Completed':
            subscription.error_message = ugettext('Payment not completed')
        elif payment_amount <> settings.PAYPAL_APP_SUBSCRIPTION_AMOUNT:
            subscription.error_message = ugettext('Amount altered. Bad guy ?')
        elif payment_currency <> settings.PAYPAL_APP_SUBSCRIPTION_CURRENCY:
            subscription.error_message = ugettext('Amount altered. Bad guy ?')
        else:
            subscription.error_message = ugettext('Paid')
            subscription.state = SUBSCRIPTION_STATE_PAID

            # create an invoice for this payment
            # first, get the provider user
            provider = User.objects.get(email=settings.SERVICE_PROVIDER_EMAIL)
            if provider.get_profile().vat_number:
                payment_amount = Decimal(payment_amount) / Decimal('1.196')

            # look for a customer corresponding to user
            address, created = Address.objects.get_or_create(contact__email=user.email,
                                                             owner=provider,
                                                             defaults={'street': profile.address.street,
                                                                       'zipcode': profile.address.zipcode,
                                                                       'city': profile.address.city,
                                                                       'country': profile.address.country,
                                                                       'owner': provider})
            customer, created = Contact.objects.get_or_create(email=user.email,
                                                              defaults={'contact_type': CONTACT_TYPE_COMPANY,
                                                                        'name': '%s %s' % (user.first_name, user.last_name),
                                                                        'company_id': profile.company_id,
                                                                        'legal_form': 'Auto-entrepreneur',
                                                                        'email': user.email,
                                                                        'address': address,
                                                                        'owner': provider})
            # create a related project if needed
            # set it to finished to clear daily business
            project, created = Project.objects.get_or_create(state=PROJECT_STATE_FINISHED,
                                                             customer=customer,
                                                             name='Subscription %s - %s %s' % (Site.objects.get_current().name, user.first_name, user.last_name),
                                                             defaults={'state': PROJECT_STATE_FINISHED,
                                                                       'customer': customer,
                                                                       'name': 'Subscription %s - %s %s' % (Site.objects.get_current().name, user.first_name, user.last_name),
                                                                       'owner': provider})

            # create proposal for this subscription
            begin_date = datetime.date.today()
            if begin_date < last_subscription.expiration_date:
                begin_date = last_subscription.expiration_date

            proposal = Proposal.objects.create(project=project,
                                               reference='subscription%i%i%i' % (subscription.expiration_date.year,
                                                                                  subscription.expiration_date.month,
                                                                                  subscription.expiration_date.day),
                                               state=PROPOSAL_STATE_BALANCED,
                                               begin_date=begin_date,
                                               end_date=subscription.expiration_date,
                                               contract_content='',
                                               update_date=datetime.date.today(),
                                               expiration_date=None,
                                               owner=provider)

            unit_price = Decimal(settings.PAYPAL_APP_SUBSCRIPTION_AMOUNT)
            if provider.get_profile().vat_number:
                unit_price = Decimal(unit_price) / Decimal('1.196')

            proposal_row = ProposalRow.objects.create(proposal=proposal,
                                                      label=item_name,
                                                      category=ROW_CATEGORY_SERVICE,
                                                      quantity=1,
                                                      unit_price='%s' % unit_price,
                                                      owner=provider)

            # finally create invoice
            invoice = Invoice.objects.create(customer=customer,
                                             invoice_id=Invoice.objects.get_next_invoice_id(provider),
                                             state=INVOICE_STATE_PAID,
                                             amount=payment_amount,
                                             edition_date=datetime.date.today(),
                                             payment_date=datetime.date.today(),
                                             paid_date=datetime.date.today(),
                                             payment_type=PAYMENT_TYPE_BANK_CARD,
                                             execution_begin_date=begin_date,
                                             execution_end_date=subscription.expiration_date,
                                             penalty_date=None,
                                             penalty_rate=None,
                                             discount_conditions=None,
                                             owner=provider)

            invoice_row = InvoiceRow.objects.create(proposal=proposal,
                                                    invoice=invoice,
                                                    label=item_name,
                                                    category=ROW_CATEGORY_SERVICE,
                                                    quantity=1,
                                                    unit_price=payment_amount,
                                                    balance_payments=True,
                                                    vat_rate=VAT_RATES_19_6,
                                                    owner=provider)
            # create expense for paypal fee
            expense = Expense.objects.create(date=datetime.date.today(),
                                             reference=transaction_id,
                                             supplier='Paypal',
                                             amount=fee,
                                             payment_type=PAYMENT_TYPE_BANK_CARD,
                                             description='Commission paypal',
                                             owner=provider)

            # generate invoice in pdf
            response = HttpResponse(mimetype='application/pdf')
            invoice.to_pdf(provider, response)

            subject_template = loader.get_template('core/subscription_paid_email_subject.html')
            subject_context = {'site_name': Site.objects.get_current().name}
            subject = subject_template.render(Context(subject_context))
            body_template = loader.get_template('core/subscription_paid_email.html')
            body_context = {'site_name': Site.objects.get_current().name,
                            'expiration_date': subscription.expiration_date}
            body = body_template.render(Context(body_context))
            email = EmailMessage(subject=subject,
                                 body=body,
                                 to=[user.email])
            email.attach('facture_%i.pdf' % (invoice.invoice_id), response.content, 'application/pdf')
            email.send(fail_silently=(not settings.DEBUG))

        subscription.save()

    return render_to_response('core/paypal_ipn.html',
                              {'active': 'account',
                               'title': _('Subscribe')},
                              context_instance=RequestContext(request))

Example 104

Project: django-dynamicresponse Source File: emitters.py
    def construct(self):
        """
        Recursively serialize a lot of types, and
        in cases where it doesn't recognize the type,
        it will fall back to Django's `smart_unicode`.

        Returns `dict`.
        """

        def _any(thing, fields=()):
            """
            Dispatch, all types are routed through here.
            """

            ret = None

            if isinstance(thing, QuerySet):
                ret = _qs(thing, fields=fields)
            elif isinstance(thing, Page):
                ret = _list(thing.object_list, fields=fields)
            elif isinstance(thing, (tuple, list)):
                ret = _list(thing, fields=fields)
            elif isinstance(thing, dict):
                ret = _dict(thing, fields=fields)
            elif isinstance(thing, decimal.Decimal):
                ret = str(thing)
            elif isinstance(thing, Model):
                ret = _model(thing, fields=fields)
            elif inspect.isfunction(thing):
                if not inspect.getargspec(thing)[0]:
                    ret = _any(thing())
            elif hasattr(thing, '__emittable__'):
                f = thing.__emittable__
                if inspect.ismethod(f) and len(inspect.getargspec(f)[0]) == 1:
                    ret = _any(f())
            elif repr(thing).startswith("<django.db.models.fields.related.RelatedManager"):
                ret = _any(thing.all())
            else:
                ret = smart_unicode(thing, strings_only=True)

            return ret

        def _fk(data, field):
            """
            Foreign keys.
            """

            return _any(getattr(data, field.name))

        def _related(data, fields=()):
            """
            Foreign keys.
            """

            return [ _model(m, fields) for m in data.iterator() ]

        def _m2m(data, field, fields=()):
            """
            Many to many (re-route to `_model`.)
            """

            return [ _model(m, fields) for m in getattr(data, field.name).iterator() ]

        def _model(data, fields=()):
            """
            Models. Will respect the `fields` and/or
            `exclude` on the handler (see `typemapper`.)
            """

            ret = { }
            handler = None

            # Does the model implement get_serialization_fields() or serialize_fields()?
            # We should only serialize these fields.
            if hasattr(data, 'get_serialization_fields'):
                fields = set(data.get_serialization_fields())
            if hasattr(data, 'serialize_fields'):
                fields = set(data.serialize_fields())

            # Is the model a Django user instance?
            # Ensure that only core (non-sensitive fields) are serialized
            if isinstance(data, User):
                fields = getattr(settings, 'DYNAMICRESPONSE_DJANGO_USER_FIELDS', ('id', 'email', 'first_name', 'last_name'))

            # Should we explicitly serialize specific fields?
            if fields:

                v = lambda f: getattr(data, f.attname)

                get_fields = set(fields)
                met_fields = self.method_fields(handler, get_fields)

                # Serialize normal fields
                for f in data._meta.local_fields:
                    if f.serialize and not any([ p in met_fields for p in [ f.attname, f.name ]]):
                        if not f.rel:
                            if f.attname in get_fields:
                                ret[f.attname] = _any(v(f))
                                get_fields.remove(f.attname)
                        else:
                            if f.attname[:-3] in get_fields:
                                ret[f.name] = _fk(data, f)
                                get_fields.remove(f.name)

                # Serialize many-to-many fields
                for mf in data._meta.many_to_many:
                    if mf.serialize and mf.attname not in met_fields:
                        if mf.attname in get_fields:
                            ret[mf.name] = _m2m(data, mf)
                            get_fields.remove(mf.name)

                # Try to get the remainder of fields
                for maybe_field in get_fields:
                    if isinstance(maybe_field, (list, tuple)):
                        model, fields = maybe_field
                        inst = getattr(data, model, None)

                        if inst:
                            if hasattr(inst, 'all'):
                                ret[model] = _related(inst, fields)
                            elif callable(inst):
                                if len(inspect.getargspec(inst)[0]) == 1:
                                    ret[model] = _any(inst(), fields)
                            else:
                                ret[model] = _model(inst, fields)

                    elif maybe_field in met_fields:
                        # Overriding normal field which has a "resource method"
                        # so you can alter the contents of certain fields without
                        # using different names.
                        ret[maybe_field] = _any(met_fields[maybe_field](data))

                    else:
                        maybe = getattr(data, maybe_field, None)
                        if maybe:
                            if callable(maybe):
                                if len(inspect.getargspec(maybe)[0]) == 1:
                                    ret[maybe_field] = _any(maybe())
                            else:
                                ret[maybe_field] = _any(maybe)
                        else:
                            ret[maybe_field] = _any(maybe)

            else:

                for f in data._meta.fields:
                    if not f.attname.startswith('_'):
                        ret[f.attname] = _any(getattr(data, f.attname))

                fields = dir(data.__class__) + ret.keys()
                add_ons = [k for k in dir(data) if k not in fields]

                for k in add_ons:
                    if not k.__str__().startswith('_'):
                        ret[k] = _any(getattr(data, k))

            return ret

        def _qs(data, fields=()):
            """
            Querysets.
            """

            return [ _any(v, fields) for v in data ]

        def _list(data, fields=()):
            """
            Lists.
            """

            return [ _any(v, fields) for v in data ]

        def _dict(data, fields=()):
            """
            Dictionaries.
            """

            return dict([ (k, _any(v, fields)) for k, v in data.iteritems() ])

        # Kickstart the seralizin'.
        return _any(self.data, self.fields)

Example 105

Project: oq-hazardlib Source File: point_test.py
    def test_7_many_ruptures(self):
        source_id = name = 'test7-source'
        trt = TRT.VOLCANIC
        mag1 = 4.5
        mag2 = 5.5
        mag1_rate = 9e-3
        mag2_rate = 9e-4
        hypocenter1 = 9.0
        hypocenter2 = 10.0
        hypocenter1_weight = Decimal('0.8')
        hypocenter2_weight = Decimal('0.2')
        nodalplane1 = NodalPlane(strike=45, dip=90, rake=0)
        nodalplane2 = NodalPlane(strike=0, dip=45, rake=10)
        nodalplane1_weight = Decimal('0.3')
        nodalplane2_weight = Decimal('0.7')
        upper_seismogenic_depth = 2
        lower_seismogenic_depth = 16
        rupture_aspect_ratio = 2
        rupture_mesh_spacing = 0.5
        location = Point(0, 0)
        magnitude_scaling_relationship = PeerMSR()
        tom = PoissonTOM(time_span=50)

        mfd = EvenlyDiscretizedMFD(min_mag=mag1, bin_width=(mag2 - mag1),
                                   occurrence_rates=[mag1_rate, mag2_rate])
        nodal_plane_distribution = PMF([(nodalplane1_weight, nodalplane1),
                                        (nodalplane2_weight, nodalplane2)])
        hypocenter_distribution = PMF([(hypocenter1_weight, hypocenter1),
                                       (hypocenter2_weight, hypocenter2)])
        point_source = PointSource(
            source_id, name, trt, mfd, rupture_mesh_spacing,
            magnitude_scaling_relationship, rupture_aspect_ratio, tom,
            upper_seismogenic_depth, lower_seismogenic_depth,
            location, nodal_plane_distribution, hypocenter_distribution
        )
        actual_ruptures = list(point_source.iter_ruptures())
        self.assertEqual(len(actual_ruptures),
                         point_source.count_ruptures())
        expected_ruptures = {
            (mag1, nodalplane1.rake, hypocenter1): (
                # probabilistic rupture's occurrence rate
                9e-3 * 0.3 * 0.8,
                # rupture surface corners
                planar_surface_test_data.TEST_7_RUPTURE_1_CORNERS
            ),
            (mag2, nodalplane1.rake, hypocenter1): (
                9e-4 * 0.3 * 0.8,
                planar_surface_test_data.TEST_7_RUPTURE_2_CORNERS
            ),
            (mag1, nodalplane2.rake, hypocenter1): (
                9e-3 * 0.7 * 0.8,
                planar_surface_test_data.TEST_7_RUPTURE_3_CORNERS
            ),
            (mag2, nodalplane2.rake, hypocenter1): (
                9e-4 * 0.7 * 0.8,
                planar_surface_test_data.TEST_7_RUPTURE_4_CORNERS
            ),
            (mag1, nodalplane1.rake, hypocenter2): (
                9e-3 * 0.3 * 0.2,
                planar_surface_test_data.TEST_7_RUPTURE_5_CORNERS
            ),
            (mag2, nodalplane1.rake, hypocenter2): (
                9e-4 * 0.3 * 0.2,
                planar_surface_test_data.TEST_7_RUPTURE_6_CORNERS
            ),
            (mag1, nodalplane2.rake, hypocenter2): (
                9e-3 * 0.7 * 0.2,
                planar_surface_test_data.TEST_7_RUPTURE_7_CORNERS
            ),
            (mag2, nodalplane2.rake, hypocenter2): (
                9e-4 * 0.7 * 0.2,
                planar_surface_test_data.TEST_7_RUPTURE_8_CORNERS
            )
        }
        for actual_rupture in actual_ruptures:
            expected_occurrence_rate, expected_corners = expected_ruptures[
                (actual_rupture.mag, actual_rupture.rake,
                 actual_rupture.hypocenter.depth)
            ]
            self.assertTrue(isinstance(actual_rupture,
                ParametricProbabilisticRupture))
            self.assertEqual(actual_rupture.occurrence_rate,
                             expected_occurrence_rate)
            self.assertIs(actual_rupture.temporal_occurrence_model, tom)
            self.assertEqual(actual_rupture.tectonic_region_type, trt)
            surface = actual_rupture.surface

            tl, tr, br, bl = expected_corners
            self.assertEqual(tl, surface.top_left)
            self.assertEqual(tr, surface.top_right)
            self.assertEqual(bl, surface.bottom_left)
            self.assertEqual(br, surface.bottom_right)

Example 106

Project: synnefo Source File: tests.py
    def test_register_image(self, backend):
        required = {
            "HTTP_X_IMAGE_META_NAME": u"TestImage\u2602",
            "HTTP_X_IMAGE_META_LOCATION": "pithos://4321-4321/%E2%98%82/foo"}
        # Check valid name
        headers = deepcopy(required)
        headers.pop("HTTP_X_IMAGE_META_NAME")
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("name" in response.content)
        headers["HTTP_X_IMAGE_META_NAME"] = ""
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("name" in response.content)
        # Check valid location
        headers = deepcopy(required)
        headers.pop("HTTP_X_IMAGE_META_LOCATION")
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("location" in response.content)
        headers["HTTP_X_IMAGE_META_LOCATION"] = ""
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("location" in response.content)
        headers["HTTP_X_IMAGE_META_LOCATION"] = "pitho://4321-4321/images/foo"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("location" in response.content)
        headers["HTTP_X_IMAGE_META_LOCATION"] = "pithos://4321-4321/foo"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("location" in response.content)
        # ID not supported
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_ID"] = "1234"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        headers = deepcopy(required)
        # ID not supported
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_LOLO"] = "1234"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_STORE"] = "pitho"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("store " in response.content)
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_DISK_FORMAT"] = "diskdumpp"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("disk format" in response.content)
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_CONTAINER_FORMAT"] = "baree"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("container format" in response.content)

        backend().get_object_meta.return_value = {"uuid": "1234-1234-1234",
                                                  "bytes": 42,
                                                  "is_snapshot": True,
                                                  "hash": "unique_mapfile",
                                                  "is_snapshot": True,
                                                  "mapfile": "unique_mapfile"}
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_SIZE"] = "foo"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("size" in response.content)
        headers["HTTP_X_IMAGE_META_SIZE"] = "43"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        self.assertTrue("size" in response.content)

        # Unicode Error:
        headers["HTTP_X_IMAGE_META_NAME"] = "\xc2"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)
        headers["HTTP_X_IMAGE_META_NAME"] = u"TestImage\u2602"

        headers["HTTP_X_IMAGE_META_SIZE"] = 42
        headers["HTTP_X_IMAGE_META_CHECKSUM"] = "wrong_checksum"
        response = self.post(IMAGES_URL, **headers)
        self.assertBadRequest(response)

        backend().get_object_by_uuid.return_value = (
            {"uuid": "1234-1234-1234",
             "bytes": 42,
             "mapfile": "unique_mapfile",
             "is_snapshot": True,
             "hash": "unique_mapfile",
             "version": 42,
             'version_timestamp': Decimal('1392487853.863673'),
             "plankton:name": u"TestImage\u2602",
             "plankton:container_format": "bare",
             "plankton:disk_format": "diskdump",
             "plankton:status": u"AVAILABLE"},
            {"read": []},
            u"4321-4321/\u2602/foo",
        )
        headers = deepcopy(required)
        response = self.post(IMAGES_URL, **headers)
        self.assertSuccess(response)
        self.assertEqual(response["x-image-meta-location"],
                         "pithos://4321-4321/%E2%98%82/foo")
        self.assertEqual(response["x-image-meta-id"], "1234-1234-1234")
        self.assertEqual(response["x-image-meta-status"], "AVAILABLE")
        self.assertEqual(response["x-image-meta-deleted-at"], "")
        self.assertEqual(response["x-image-meta-is-public"], "False")
        self.assertEqual(response["x-image-meta-owner"], "4321-4321")
        self.assertEqual(response["x-image-meta-size"], "42")
        self.assertEqual(response["x-image-meta-checksum"], "unique_mapfile")
        self.assertEqual(urllib.unquote(response["x-image-meta-name"]),
                         u"TestImage\u2602".encode("utf-8"))
        self.assertEqual(response["x-image-meta-container-format"], "bare")
        self.assertEqual(response["x-image-meta-disk-format"], "diskdump")
        self.assertEqual(response["x-image-meta-created-at"],
                         "2014-02-15 18:10:53")
        self.assertEqual(response["x-image-meta-updated-at"],
                         "2014-02-15 18:10:53")

        # Extra headers,properties
        backend().get_object_by_uuid.return_value = (
            {"uuid": "1234-1234-1234",
             "bytes": 42,
             "is_snapshot": True,
             "hash": "unique_mapfile",
             "mapfile": "unique_mapfile",
             "version": 42,
             'version_timestamp': Decimal('1392487853.863673'),
             "plankton:name": u"TestImage\u2602",
             "plankton:container_format": "bare",
             "plankton:disk_format": "diskdump",
             "plankton:status": u"AVAILABLE"},
            {"read": []},
            u"4321-4321/\u2602/foo",
        )
        headers = deepcopy(required)
        headers["HTTP_X_IMAGE_META_IS_PUBLIC"] = True
        headers["HTTP_X_IMAGE_META_PROPERTY_KEY1"] = "val1"
        headers["HTTP_X_IMAGE_META_PROPERTY_KEY2"] = u"\u2601"
        response = self.post(IMAGES_URL, **headers)
        name, args, kwargs = backend().update_object_meta.mock_calls[-1]
        metadata = args[5]
        self.assertEqual(metadata["plankton:property:key1"], "val1")
        self.assertEqual(metadata["plankton:property:key2"], u"\u2601")
        self.assertSuccess(response)

Example 107

Project: ec2price Source File: collector.py
def collect(model, hours):
    row = model.progress.get_item(name='end_time')
    if row['timestamp'] is None:
        logging.debug('using initial window of -%d hours', hours)
        start_time = arrow.utcnow().replace(hours=-hours)
    else:
        start_time = arrow.get(row['timestamp'])
    logging.debug('start time: %s', start_time)

    end_time = arrow.utcnow()
    logging.debug('end time: %s', end_time)

    all_regions = set()
    all_product_descriptions = set()
    all_instance_types = set()
    all_instance_zones = set()

    session = botocore.session.get_session()
    ec2 = session.get_service('ec2')
    operation = ec2.get_operation('DescribeSpotPriceHistory')

    for region in ec2.region_names:
        if any(region.startswith(x) for x in _EXCLUDED_REGION_PREFIXES):
            continue
        all_regions.add(region)

        next_token = None
        while True:
            logging.debug('collecting spot prices from region: %s', region)
            endpoint = ec2.get_endpoint(region)
            if next_token:
                response, data = operation.call(
                    endpoint,
                    start_time=start_time.format(_FMT),
                    end_time=end_time.format(_FMT),
                    next_token=next_token,
                )
            else:
                response, data = operation.call(
                    endpoint,
                    start_time=start_time.format(_FMT),
                )
            next_token = data.get('NextToken')
            logging.debug('next_token: %s', next_token)
            spot_data = data.get('SpotPriceHistory', [])

            #conn = boto.ec2.connect_to_region(r.name)
            #logging.debug('getting spot prices for region: %s', r.name)
            #data = conn.get_spot_price_history(start_time=start_time)

            logging.debug('saving %d spot prices for region: %s',
                          len(spot_data), region)
            with model.spot_prices.batch_write() as batch:
                for d in spot_data:
                    all_product_descriptions.add(d['ProductDescription'])
                    all_instance_types.add(d['InstanceType'])
                    all_instance_zones.add((
                        d['ProductDescription'],
                        d['InstanceType'],
                        d['AvailabilityZone'],
                    ))
                    batch.put_item(data={
                        'instance_zone_id': ':'.join([
                            d['ProductDescription'],
                            d['InstanceType'],
                            d['AvailabilityZone'],
                        ]),
                        'timestamp': arrow.get(d['Timestamp']).timestamp,
                        'price': decimal.Decimal(str(d['SpotPrice'])),
                    })
            if not next_token:
                break

    logging.debug('saving %d regions', len(all_regions))
    with model.regions.batch_write() as batch:
        for i in all_regions:
            batch.put_item(data={'region': i})

    logging.debug('saving %d product_descriptions',
                  len(all_product_descriptions))
    with model.product_descriptions.batch_write() as batch:
        for i in all_product_descriptions:
            batch.put_item(data={'product_description': i})

    logging.debug('saving %d instance_types', len(all_instance_types))
    with model.instance_types.batch_write() as batch:
        for i in all_instance_types:
            batch.put_item(data={'instance_type': i})

    logging.debug('saving %d instance_zones', len(all_instance_zones))
    with model.instance_zones.batch_write() as batch:
        for i in all_instance_zones:
            batch.put_item(data={
                'instance_id': ':'.join([i[0], i[1]]),
                'zone': i[2],
            })

    logging.debug('saving end_time')
    with model.progress.batch_write() as batch:
        batch.put_item(data={
            'name': 'end_time',
            'timestamp': end_time.timestamp,
        })

Example 108

Project: hellolily Source File: import_deals.py
    def _create_deal(self, values):

        try:
            # Create deal
            deal_kwargs = dict()
            for column, value in values.items():
                if value and column in self.column_attribute_mapping:
                    attribute = self.column_attribute_mapping.get(column)
                    # Set created date to original created date in sugar.
                    if attribute == 'created':
                        value = timezone.make_aware(
                            datetime.strptime(str(value), "%d-%m-%Y %H.%M"),
                            timezone.get_current_timezone()
                        )
                    deal_kwargs[attribute] = value

            eenmalig = 0.00
            if values.get('Eenmalig') is not None:
                eenmalig = Decimal(values.get('Eenmalig').decode('utf-8').replace(u'\u20ac', '').replace(',', ''))

            maandelijks = 0.00
            if values.get('Maandelijks') is not None:
                maandelijks = Decimal(
                    values.get('Maandelijks').decode('utf-8').replace(u'\u20ac', '').replace(',', '')
                )

            hardware = 0.00
            if values.get('Hardware') is not None:
                hardware = Decimal(values.get('Hardware').decode('utf-8').replace(u'\u20ac', '').replace(',', ''))

            amount = 0.00
            if values.get('Opportunity Amount') is not None:
                amount = Decimal(
                    values.get('Opportunity Amount').decode('utf-8').replace(u'\u20ac', '').replace(',', '')
                )

            deal_kwargs['amount_once'] = (eenmalig + hardware) if (eenmalig > 0.00 or hardware > 0.00) else amount
            deal_kwargs['amount_recurring'] = maandelijks

            deal_kwargs['type'] = self.type_mapping.get(values.get('Type', None))
            deal_kwargs['stage'] = self.stage_mapping.get(values.get('Sales Stage', None))
            deal_kwargs['feedback_form_sent'] = True if values.get('Feedback Form Send?') == '1' else False
            deal_kwargs['currency'] = CURRENCY

            try:
                deal = Deal.objects.get(tenant_id=self.tenant_pk, import_id=deal_kwargs['import_id'])
                # Set logic here for what you want to import when rerunning the import
                # Check wiki for last import date to filter any changed deals after the import
                return
            except Deal.DoesNotExist:
                deal = Deal(tenant_id=self.tenant_pk)

                for k, v in deal_kwargs.items():
                    setattr(deal, k, v)

                deal.is_archived = True

                some_time_ago = timezone.make_aware(
                    (datetime.now() - timedelta(6 * 365 / 12)), timezone.get_current_timezone()
                )

                is_special = values.get('Sales Stage', '') == 'Special'
                if is_special and deal.created > some_time_ago:
                    deal.is_archived = False

                is_closed_won = values.get('Sales Stage', None) not in ('Closed Won', 'Closed Lost')

                if is_closed_won and deal.created > some_time_ago:
                    deal.is_archived = False

                user_id = values.get('Assigned User ID')
                if user_id and user_id in self.user_mapping:
                    try:
                        deal.assigned_to = LilyUser.objects.get(
                            pk=self.user_mapping[user_id],
                            tenant_id=self.tenant_pk
                        )
                    except LilyUser.DoesNotExist:
                        if user_id not in self.already_logged:
                            self.already_logged.add(user_id)
                            logger.warning(u'Assignee does not exists as an LilyUser. %s' % user_id)
                else:
                    # Only log when user_name not empty.
                    if user_id and user_id not in self.already_logged:
                        self.already_logged.add(user_id)
                        logger.warning(u'Assignee does not have an usermapping. %s' % user_id)

            try:
                deal.save()
            except Exception as e:
                logger.warning('cannot save deal:%s\n %s' % (e, values))

        except Exception as e:
            logger.warning('Error importing row:%s\n %s' % (e, values))

Example 109

Project: tastypie-queryset-client Source File: client.py
def model_gen(**configs):
    """ generate model

    :param slumber main_client:
    :param str model_name: resource name
    :param str endpoint: endpoint url
    :param str schema: schema url
    :param bool strict_field: strict field and convert value in field. ( default: True )
    :param Manager objects: Manager Class
    :param Client objects: Client Class
    """
    class Model(object):
        """ Inner Class
        """
        _client = getattr(configs.get("main_client"), configs.get("model_name"))
        _main_client = configs.get("main_client")
        _base_client = configs.get("base_client")
        _model_name = configs.get("model_name")
        _endpoint = configs.get("endpoint")
        _schema = configs.get("schema")
        _strict_field = configs.get("strict_field", True)
        _schema_store = _base_client.schema(_model_name)
        _base_url = _main_client._store["base_url"]
        _fields = dict()  # TODO: set field attribute
        objects = None

        def __init__(self, **kwargs):
            self._clear_fields()
            self._setattrs(**kwargs)  # TODO: LazyCall

        def __repr__(self):
            return "<{0}: {1}{2}>".format(self._model_name, self._endpoint,
                                          " " + str(self._fields) if self._fields else "")

        def _clear_fields(self, klass=None):
            c = klass or self
            for field in c._fields:
                c.__delattr__(field)
            c._fields = dict()

        def _setattrs(self, **kwargs):
            for field in kwargs:
                self.__setattr__(field, kwargs[field])
                if not field in self._fields:
                    raise FieldTypeError("'{0}' is an invalid keyword argument for this function"
                                         .format(field))

        def __setattr__(self, attr, value):
            self._setfield(attr, value)

        def _setfield(self, attr, value):
            if hasattr(self, "_schema_store"):
                if attr in self._schema_store["fields"]:
                    nullable = self._schema_store["fields"][attr]["nullable"]
                    blank = self._schema_store["fields"][attr]["blank"]
                    field_type = self._schema_store["fields"][attr]["type"]
                    check_type = False
                    err = ""
                    if self._strict_field is True:
                        try:
                            if (nullable or blank) and not value:
                                check_type = True
                            elif field_type == "string":
                                check_type = isinstance(value, (str, unicode))
                            elif field_type == "integer":
                                if isinstance(value, (str, unicode)):
                                    check_type = value.isdigit()
                                elif isinstance(value, int):
                                    check_type = True
                            elif field_type == "float":
                                if isinstance(value, float):
                                    check_type = True
                            elif field_type == "decimal":
                                value = decimal.Decimal(value)
                                check_type = isinstance(value, decimal.Decimal)
                            elif field_type == "datetime":
                                if isinstance(value, (str, unicode)):
                                    try:
                                        value = datetime.strptime(value, "%Y-%m-%dT%H:%M:%S.%f")
                                    except ValueError:
                                        value = datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
                                check_type = isinstance(value, datetime)
                            elif field_type == "time":
                                check_type = True
                            elif field_type == "boolean":
                                check_type = True
                            if field_type == "related":
                                check_type = True
                        except Exception, err:
                            check_type = False
                        finally:
                            if check_type is not True:
                                raise FieldTypeError(
                                    "'{0}' is '{1}' type. ( Input '{2}:{3}' ) {4}"
                                        .format(attr, field_type, value, type(value).__name__, err))
                    self._fields[attr] = value  # set field
            super(Model, self).__setattr__(attr, value)

        def _get_field(self, field):
            if field in self._schema_store["fields"]:
                field_type = self._schema_store["fields"][field]["type"]
                value = self._fields[field]
                if self._strict_field is True:
                    try:
                        if field_type == "string":
                            pass
                        elif field_type == "integer":
                            pass   # input safe
                        elif field_type == "float":
                            pass   # input safe
                        elif field_type == "decimal":
                            pass   # input safe
                        elif field_type == "datetime":
                            value = value.isoformat()
                        elif field_type == "time":
                            pass
                        elif field_type == "boolean":
                            pass
                        else:
                            pass
                    except Exception:
                        if self._strict_field is True:
                            raise FieldTypeError(
                                "'{0}' is '{1}' type. ( Input '{2}:{3}' )"
                                    .format(field, field_type, value, type(value).__name__))
                if field_type == "related":
                    value = getattr(value, "resource_uri", value)
                    if self._schema_store["fields"][field]["related_type"] == "to_many":
                        if isinstance(value, (list, tuple)) is False:
                            value = [value]
                return value

        def _get_fields(self):
            fields = {}
            for field in self._fields:
                fields.update({field: self._get_field(field)})
            return fields

        @classmethod
        def clone(cls, model_name=None):
            """ create `model_name` model """
            return cls._base_client._model_gen(model_name or cls._model_name)

        @classmethod
        def schema(cls, *attrs):
            """

            * attrs example ::

                    >>> self.schema("fields")
                    # out fields schema
                    >>> self.schema("fields", "id")
                    # out id schema

            :param tuple attrs:
            :rtype: dict
            :return: model schema
            """
            if attrs:
                s = cls._schema_store
                for attr in attrs:
                    s = s[attr]
                return s
            else:
                return cls._schema_store

        def save(self):
            """ save

            :rtype: NoneType
            """
            if hasattr(self, "id"):
                self._client(self.id).put(self._get_fields())  # return bool
            else:
                self._client.post(self._get_fields())
                self._setattrs(**self._client._handle_redirect(self._client._))

        def delete(self):
            """ delete

            :rtype: NoneType
            """
            assert hasattr(self, "id") is True, "{0} object can't be deleted because its {2} attribute \
                is set to None.".format(self._model_name, self._schema_store["fields"]["id"]["type"])
            self._client(self.id).delete()
            self._clear_fields()

    Model.objects = configs.get("objects", Manager(Model))
    return Model

Example 110

Project: OCRmyPDF Source File: pageinfo.py
def _find_page_regular_images(page, pageinfo, contentsinfo):
    "Find images stored in XObject resources"

    try:
        page['/Resources']['/XObject']
    except KeyError:
        return
    for xobj in page['/Resources']['/XObject']:
        # PyPDF2 returns the keys as an iterator
        pdfimage = page['/Resources']['/XObject'][xobj]
        if pdfimage['/Subtype'] != '/Image':
            continue
        image = {}
        image['name'] = str(xobj)
        image['width'] = pdfimage['/Width']
        image['height'] = pdfimage['/Height']
        if '/BitsPerComponent' in pdfimage:
            image['bpc'] = pdfimage['/BitsPerComponent']
        else:
            image['bpc'] = 8

        # Fixme: this is incorrectly treats explicit masks as stencil masks,
        # but good enough for now. Explicit masks have /ImageMask true but are
        # never called for in content stream, instead are drawn as a /Mask on
        # other images. For our purposes finding out the details of /Mask
        # will seldom matter.
        if '/ImageMask' in pdfimage:
            image['type'] = 'stencil' if pdfimage['/ImageMask'].value \
                            else 'image'
        else:
            image['type'] = 'image'
        if '/Filter' in pdfimage:
            filter_ = pdfimage['/Filter']
            if isinstance(filter_, pypdf.generic.ArrayObject):
                filter_ = filter_[0]
            image['enc'] = FRIENDLY_ENCODING.get(filter_, 'image')
        else:
            image['enc'] = 'image'
        if '/ColorSpace' in pdfimage:
            cs = pdfimage['/ColorSpace']
            if isinstance(cs, pypdf.generic.ArrayObject):
                cs = cs[0]
            image['color'] = FRIENDLY_COLORSPACE.get(cs, '-')
        else:
            image['color'] = 'jpx' if image['enc'] == 'jpx' else '?'

        image['comp'] = FRIENDLY_COMP.get(image['color'], '?')

        # Bit of a hack... infer grayscale if component count is uncertain
        # but encoding must be monochrome. This happens if a monochrome image
        # has an ICC profile attached. Better solution would be to examine
        # the ICC profile.
        if image['comp'] == '?' and image['enc'] in ('ccitt', 'jbig2'):
            image['comp'] = FRIENDLY_COMP['gray']

        image['dpi_w'] = image['dpi_h'] = 0

        for raster in contentsinfo.raster_settings:
            # Loop in case the same image is display multiple times on a page
            if raster.name != image['name']:
                continue

            if raster.stack_depth == 0 and _is_unit_square(raster.shorthand):
                # At least one PDF in the wild (and test suite) draws an image
                # when the graphics stack depth is 0, meaning that the image
                # gets drawn into a square of 1x1 PDF units (or 1/72",
                # or 0.35 mm).  The equivalent DPI will be >100,000.  Exclude
                # these from our DPI calculation for the page.
                continue

            dpi_w, dpi_h = _get_dpi(
                raster.shorthand, (image['width'], image['height']))

            # When image is used multiple times take the highest DPI it is
            # rendered at
            image['dpi_w'] = max(dpi_w, image.get('dpi_w', 0))
            image['dpi_h'] = max(dpi_h, image.get('dpi_h', 0))

        DPI_PREC = Decimal('1.000')
        image['dpi_w'] = Decimal(image['dpi_w']).quantize(DPI_PREC)
        image['dpi_h'] = Decimal(image['dpi_h']).quantize(DPI_PREC)
        dpi = Decimal(image['dpi_w'] * image['dpi_h']).sqrt()
        image['dpi'] = dpi.quantize(DPI_PREC)
        yield image

Example 111

Project: mythbox Source File: jelly.py
    def jelly(self, obj):
        if isinstance(obj, Jellyable):
            preRef = self._checkMutable(obj)
            if preRef:
                return preRef
            return obj.jellyFor(self)
        objType = type(obj)
        if self.taster.isTypeAllowed(qual(objType)):
            # "Immutable" Types
            if ((objType is StringType) or
                (objType is IntType) or
                (objType is LongType) or
                (objType is FloatType)):
                return obj
            elif objType is MethodType:
                return ["method",
                        obj.im_func.__name__,
                        self.jelly(obj.im_self),
                        self.jelly(obj.im_class)]

            elif UnicodeType and objType is UnicodeType:
                return ['unicode', obj.encode('UTF-8')]
            elif objType is NoneType:
                return ['None']
            elif objType is FunctionType:
                name = obj.__name__
                return ['function', str(pickle.whichmodule(obj, obj.__name__))
                        + '.' +
                        name]
            elif objType is ModuleType:
                return ['module', obj.__name__]
            elif objType is BooleanType:
                return ['boolean', obj and 'true' or 'false']
            elif objType is datetime.datetime:
                if obj.tzinfo:
                    raise NotImplementedError(
                        "Currently can't jelly datetime objects with tzinfo")
                return ['datetime', '%s %s %s %s %s %s %s' % (
                    obj.year, obj.month, obj.day, obj.hour,
                    obj.minute, obj.second, obj.microsecond)]
            elif objType is datetime.time:
                if obj.tzinfo:
                    raise NotImplementedError(
                        "Currently can't jelly datetime objects with tzinfo")
                return ['time', '%s %s %s %s' % (obj.hour, obj.minute,
                                                 obj.second, obj.microsecond)]
            elif objType is datetime.date:
                return ['date', '%s %s %s' % (obj.year, obj.month, obj.day)]
            elif objType is datetime.timedelta:
                return ['timedelta', '%s %s %s' % (obj.days, obj.seconds,
                                                   obj.microseconds)]
            elif objType is ClassType or issubclass(objType, type):
                return ['class', qual(obj)]
            elif decimal is not None and objType is decimal.Decimal:
                return self.jelly_decimal(obj)
            else:
                preRef = self._checkMutable(obj)
                if preRef:
                    return preRef
                # "Mutable" Types
                sxp = self.prepare(obj)
                if objType is ListType:
                    sxp.extend(self._jellyIterable(list_atom, obj))
                elif objType is TupleType:
                    sxp.extend(self._jellyIterable(tuple_atom, obj))
                elif objType in DictTypes:
                    sxp.append(dictionary_atom)
                    for key, val in obj.items():
                        sxp.append([self.jelly(key), self.jelly(val)])
                elif (_set is not None and objType is set or
                      objType is _sets.Set):
                    sxp.extend(self._jellyIterable(set_atom, obj))
                elif (_set is not None and objType is frozenset or
                      objType is _sets.ImmutableSet):
                    sxp.extend(self._jellyIterable(frozenset_atom, obj))
                else:
                    className = qual(obj.__class__)
                    persistent = None
                    if self.persistentStore:
                        persistent = self.persistentStore(obj, self)
                    if persistent is not None:
                        sxp.append(persistent_atom)
                        sxp.append(persistent)
                    elif self.taster.isClassAllowed(obj.__class__):
                        sxp.append(className)
                        if hasattr(obj, "__getstate__"):
                            state = obj.__getstate__()
                        else:
                            state = obj.__dict__
                        sxp.append(self.jelly(state))
                    else:
                        self.unpersistable(
                            "instance of class %s deemed insecure" %
                            qual(obj.__class__), sxp)
                return self.preserve(obj, sxp)
        else:
            if objType is InstanceType:
                raise InsecureJelly("Class not allowed for instance: %s %s" %
                                    (obj.__class__, obj))
            raise InsecureJelly("Type not allowed for object: %s %s" %
                                (objType, obj))

Example 112

Project: pynode Source File: ChainDb.py
	def newblock_txs(self):
		txlist = []
		for tx in self.mempool.pool.itervalues():

			# query finalized, non-coinbase mempool tx's
			if tx.is_coinbase() or not tx.is_final():
				continue

			# iterate through inputs, calculate total input value
			valid = True
			nValueIn = 0
			nValueOut = 0
			dPriority = Decimal(0)

			for tin in tx.vin:
				in_tx = self.gettx(tin.prevout.hash)
				if (in_tx is None or
				    tin.prevout.n >= len(in_tx.vout)):
					valid = False
				else:
					v = in_tx.vout[tin.prevout.n].nValue
					nValueIn += v
					dPriority += Decimal(v * 1)

			if not valid:
				continue

			# iterate through outputs, calculate total output value
			for txout in tx.vout:
				nValueOut += txout.nValue

			# calculate fees paid, if any
			tx.nFeesPaid = nValueIn - nValueOut
			if tx.nFeesPaid < 0:
				continue

			# calculate fee-per-KB and priority
			tx.ser_size = len(tx.serialize())

			dPriority /= Decimal(tx.ser_size)

			tx.dFeePerKB = (Decimal(tx.nFeesPaid) /
					(Decimal(tx.ser_size) / Decimal(1000)))
			if tx.dFeePerKB < Decimal(50000):
				tx.dFeePerKB = Decimal(0)
			tx.dPriority = dPriority

			txlist.append(tx)

		# sort list by fee-per-kb, then priority
		sorted_txlist = sorted(txlist, cmp=tx_blk_cmp, reverse=True)

		# build final list of transactions.  thanks to sort
		# order above, we add TX's to the block in the
		# highest-fee-first order.  free transactions are
		# then appended in order of priority, until
		# free_bytes is exhausted.
		txlist = []
		txlist_bytes = 0
		free_bytes = 50000
		while len(sorted_txlist) > 0:
			tx = sorted_txlist.pop()
			if txlist_bytes + tx.ser_size > (900 * 1000):
				continue

			if tx.dFeePerKB > 0:
				txlist.append(tx)
				txlist_bytes += tx.ser_size
			elif free_bytes >= tx.ser_size:
				txlist.append(tx)
				txlist_bytes += tx.ser_size
				free_bytes -= tx.ser_size
		
		return txlist

Example 113

Project: mednet Source File: emitters.py
    def construct(self):
        """
        Recursively serialize a lot of types, and
        in cases where it doesn't recognize the type,
        it will fall back to Django's `smart_unicode`.
        
        Returns `dict`.
        """
        def _any(thing, fields=()):
            """
            Dispatch, all types are routed through here.
            """
            ret = None
            
            if isinstance(thing, QuerySet):
                ret = _qs(thing, fields=fields)
            elif isinstance(thing, (tuple, list)):
                ret = _list(thing)
            elif isinstance(thing, dict):
                ret = _dict(thing)
            elif isinstance(thing, decimal.Decimal):
                ret = str(thing)
            elif isinstance(thing, Model):
                ret = _model(thing, fields=fields)
            elif isinstance(thing, HttpResponse):
                pass
		#raise HttpStatusCode(thing.content, code=thing.status_code)
            elif isinstance(thing, types.FunctionType):
                if not inspect.getargspec(thing)[0]:
                    ret = _any(thing())
            elif isinstance(thing, GEOSGeometry):
                ret = thing.geojson
            else:
                ret = smart_unicode(thing, strings_only=True)

            return ret

        def _fk(data, field):
            """
            Foreign keys.
            """
            return _any(getattr(data, field.name))
        
        def _related(data, fields=()):
            """
            Foreign keys.
            """
            return [ _model(m, fields) for m in data.iterator() ]
        
        def _m2m(data, field, fields=()):
            """
            Many to many (re-route to `_model`.)
            """
            return [ _model(m, fields) for m in getattr(data, field.name).iterator() ]
        
        def _model(data, fields=()):
            """
            Models. Will respect the `fields` and/or
            `exclude` on the handler (see `typemapper`.)
            """
            ret = { }
            handler = self.in_typemapper(type(data), self.anonymous)
            get_absolute_uri = False
            
            if handler or fields:
                v = lambda f: getattr(data, f.attname)

                if not fields:
                    """
                    Fields was not specified, try to find teh correct
                    version in the typemapper we were sent.
                    """
                    mapped = self.in_typemapper(type(data), self.anonymous)
                    get_fields = set(mapped.fields)
                    exclude_fields = set(mapped.exclude).difference(get_fields)

                    if 'absolute_uri' in get_fields:
                        get_absolute_uri = True
                
                    if not get_fields:
                        get_fields = set([ f.attname.replace("_id", "", 1)
                            for f in data._meta.fields ])
                
                    # sets can be negated.
                    for exclude in exclude_fields:
                        if isinstance(exclude, basestring):
                            get_fields.discard(exclude)
                            
                        elif isinstance(exclude, re._pattern_type):
                            for field in get_fields.copy():
                                if exclude.match(field):
                                    get_fields.discard(field)
                                    
                else:
                    get_fields = set(fields)

                met_fields = self.method_fields(handler, get_fields)

                for f in data._meta.local_fields:
                    if f.serialize and f.attname not in met_fields:
                        if not f.rel:
                            if f.attname in get_fields:
                                ret[f.attname] = _any(v(f))
                                get_fields.remove(f.attname)
                        else:
                            if f.attname[:-3] in get_fields:
                                ret[f.name] = _fk(data, f)
                                get_fields.remove(f.name)
                
                for mf in data._meta.many_to_many:
                    if mf.serialize and mf.attname not in met_fields:
                        if mf.attname in get_fields:
                            ret[mf.name] = _m2m(data, mf)
                            get_fields.remove(mf.name)
                
                # try to get the remainder of fields
                for maybe_field in get_fields:
                    
                    if isinstance(maybe_field, (list, tuple)):
                        model, fields = maybe_field
                        inst = getattr(data, model, None)

                        if inst:
                            if hasattr(inst, 'all'):
                                ret[model] = _related(inst, fields)
                            elif callable(inst):
                                if len(inspect.getargspec(inst)[0]) == 1:
                                    ret[model] = _any(inst(), fields)
                            else:
                                ret[model] = _model(inst, fields)

                    elif maybe_field in met_fields:
                        # Overriding normal field which has a "resource method"
                        # so you can alter the contents of certain fields without
                        # using different names.
                        ret[maybe_field] = _any(met_fields[maybe_field](data))

                    else:                    
                        maybe = getattr(data, maybe_field, None)
                        if maybe:
                            if isinstance(maybe, (int, basestring)):
                                ret[maybe_field] = _any(maybe)
                            elif callable(maybe):
                                if len(inspect.getargspec(maybe)[0]) == 1:
                                    ret[maybe_field] = _any(maybe())
                        else:
                            handler_f = getattr(handler or self.handler, maybe_field, None)

                            if handler_f:
                                ret[maybe_field] = _any(handler_f(data))

            else:
                for f in data._meta.fields:
                    ret[f.attname] = _any(getattr(data, f.attname))
                
                fields = dir(data.__class__) + ret.keys()
                add_ons = [k for k in dir(data) if k not in fields]
                
                for k in add_ons:
                    ret[k] = _any(getattr(data, k))
            
            # resouce uri
            if self.in_typemapper(type(data), self.anonymous):
                handler = self.in_typemapper(type(data), self.anonymous)
                if hasattr(handler, 'resource_uri'):
                    url_id, fields = handler.resource_uri()
                    ret['resource_uri'] = permalink( lambda: (url_id, 
                        (getattr(data, f) for f in fields) ) )()
            
            if hasattr(data, 'get_api_url') and 'resource_uri' not in ret:
                try: ret['resource_uri'] = data.get_api_url()
                except: pass
            
            # absolute uri
            if hasattr(data, 'get_absolute_url') and get_absolute_uri:
                try: ret['absolute_uri'] = data.get_absolute_url()
                except: pass
            
            return ret
        
        def _qs(data, fields=()):
            """
            Querysets.
            """
            return [ _any(v, fields) for v in data ]
                
        def _list(data):
            """
            Lists.
            """
            return [ _any(v) for v in data ]
            
        def _dict(data):
            """
            Dictionaries.
            """
            return dict([ (k, _any(v)) for k, v in data.iteritems() ])
            
        # Kickstart the seralizin'.
        return _any(self.data, self.fields)

Example 114

Project: pyFxTrader Source File: cli.py
@click.command()
@click.option('--instrument', '-i', 'instruments',
              default=DEFAULT_INSTRUMENTS,
              multiple=True,
              type=InstrumentParamType())
@click.option('--mode', '-m', 'mode', default='backtest',
              type=click.Choice(['backtest', 'live']))
@click.option('--start', '-s', 'start_date',)
@click.option('--end', '-e', 'end_date',)
@click.option('--log', '-l', 'log_level',
              default='info',
              type=click.Choice(['info', 'debug', 'warning',]))

def main(instruments, mode, log_level, start_date=None, end_date=None):
    """
    Algorithmic trading tool.
    """

    # Make urllib3 logger more calm
    urllib3_logger = logging.getLogger('urllib3')
    urllib3_logger.setLevel(logging.CRITICAL)

    numeric_level = getattr(logging, log_level.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % log_level)
    log_filename = "logs/pyfx_debug_{}-{}.log".format(strftime("%Y_%m_%d-%H_%M"), settings.ENVIRONMENT)
    logging.basicConfig(filename=log_filename, level=logging.DEBUG)
    logger = logging.getLogger('pyFx')

    formatter = logging.Formatter("[%(asctime)s/%(levelname)s] %(funcName)s():%(lineno)d\t%(message)s")
    handler = RainbowLoggingHandler(sys.stdout, color_funcName=('black', 'yellow', True))
    handler.setFormatter(formatter)
    handler.setLevel(numeric_level)
    logger.addHandler(handler)

    start_date_ = start_date if start_date else settings.BACKTEST_START
    end_date_ = end_date if end_date else settings.BACKTEST_END
    BACKTEST_START = parser.parse(start_date_).replace(tzinfo=pytz.utc)
    BACKTEST_END = parser.parse(end_date_).replace(tzinfo=pytz.utc)

    if mode == 'backtest':
        api = oandapy.API(
            environment=settings.ENVIRONMENT,
            access_token=settings.ACCESS_TOKEN,
        )
        broker = OandaBacktestBroker(
            api=api,
            account_id=settings.ACCOUNT_ID,
            initial_balance=decimal.Decimal(5000))

        # Oanda 20:00, Local: 22:00, DailyFx: 16:00
        clock = SimulatedClock(
            start=BACKTEST_START,
            stop=BACKTEST_END,
            interval=settings.CLOCK_INTERVAL,
        )

    elif mode == 'live':
        api = oandapy.API(
            environment=settings.ENVIRONMENT,
            access_token=settings.ACCESS_TOKEN,
        )
        clock = IntervalClock(interval=settings.CLOCK_INTERVAL)
        broker = OandaRealtimeBroker(api=api, account_id=settings.ACCOUNT_ID)
    else:
        raise NotImplementedError()

    # TODO Optimize load of instrument info
    instrument_list = set(instruments)
    for inst in instrument_list:
        inst.load(broker)
    # TODO We have to be able to instantiate strategies with custom args
    strategies = [settings.STRATEGY(instrument)
                  for instrument in instrument_list]
    if mode == 'backtest':
        broker.init_backtest(BACKTEST_START, BACKTEST_END, strategies)
        pf = Portfolio(broker, mode='backtest')
    else:
        pf = Portfolio(broker, mode='live')
    controller = Controller(clock, broker, pf, strategies)
    controller.run_until_stopped()

Example 115

Project: disco-dop Source File: eval.py
Function: init
	def __init__(self, n, gtree, gsent, ctree, csent, param):
		"""Construct a pair of gold and candidate trees for evaluation."""
		self.n = n
		self.param = param
		self.csentorig, self.gsentorig = csent, gsent
		self.csent, self.gsent = csent[:], gsent[:]
		self.cpos, self.gpos = sorted(ctree.pos()), sorted(gtree.pos())
		self.lengpos = sum(1 for _, b in self.gpos
				if b not in self.param['DELETE_LABEL_FOR_LENGTH'])
		grootpos = {child[0] for child in gtree
				if child and isinstance(child[0], int)}
		# massage the data (in-place modifications)
		self.ctree = transform(ctree, self.csent, self.cpos,
				alignsent(self.csent, self.gsent, dict(self.gpos)),
				self.param, grootpos)
		self.gtree = transform(gtree, self.gsent, self.gpos,
				dict(self.gpos), self.param, grootpos)
		if len(self.csent) != len(self.gsent):
			raise ValueError('sentence length mismatch. sents:\n%s\n%s' % (
					' '.join(self.csent), ' '.join(self.gsent)))
		if self.csent != self.gsent:
			raise ValueError('candidate & gold sentences do not match:\n'
					'%r // %r' % (' '.join(csent), ' '.join(gsent)))
		self.cbrack = bracketings(self.ctree, self.param['LABELED'],
				self.param['DELETE_LABEL'], self.param['DISC_ONLY'])
		self.gbrack = bracketings(self.gtree, self.param['LABELED'],
				self.param['DELETE_LABEL'], self.param['DISC_ONLY'])
		self.lascore = self.ted = self.denom = Decimal('nan')
		self.cdep = self.gdep = ()
		self.pgbrack = Counter()
		self.pcbrack = Counter()
		self.grule = Counter()
		self.crule = Counter()
		# collect the function tags for correct bracketings & POS tags
		self.candfun = Counter((bracketing(a), b)
				for a in self.ctree.subtrees()
					for b in functions(a)
					if bracketing(a) in self.gbrack or (
						a and isinstance(a[0], int)
						and self.gpos[a[0]] == a.label))
		self.goldfun = Counter((bracketing(a), b)
				for a in self.gtree.subtrees()
					for b in functions(a)
					if bracketing(a) in self.cbrack or (
						a and isinstance(a[0], int)
						and self.cpos[a[0]] == a.label))
		if not self.gpos:
			return  # avoid 'sentences' with only punctuation.
		if self.param['LA']:
			self.lascore = leafancestor(self.gtree, self.ctree,
					self.param['DELETE_LABEL'])
		if self.param['TED']:
			self.ted, self.denom = treedisteval(self.gtree, self.ctree,
				includeroot=self.gtree.label not in self.param['DELETE_LABEL'])
		if self.param['DEP']:
			self.cdep = dependencies(self.ctree)
			self.gdep = dependencies(self.gtree)
		assert self.lascore != 1 or self.gbrack == self.cbrack, (
				'leaf ancestor score 1.0 but no exact match: (bug?)')
		self.pgbrack = parentedbracketings(self.gtree, labeled=True,
				dellabel=self.param['DELETE_LABEL'],
				disconly=self.param['DISC_ONLY'])
		self.pcbrack = parentedbracketings(self.ctree, labeled=True,
				dellabel=self.param['DELETE_LABEL'],
				disconly=self.param['DISC_ONLY'])
		self.grule = Counter((node.bitset, rule)
				for node, rule in zip(self.gtree.subtrees(),
				grammar.lcfrsproductions(self.gtree, self.gsent)))
		self.crule = Counter((node.bitset, rule)
				for node, rule in zip(self.ctree.subtrees(),
				grammar.lcfrsproductions(self.ctree, self.csent)))

Example 116

Project: django-bitcoin Source File: tasks.py
@task()
def check_integrity():
    from django_bitcoin.models import Wallet, BitcoinAddress, WalletTransaction, DepositTransaction
    from django_bitcoin.utils import bitcoind
    from django.db.models import Avg, Max, Min, Sum
    from decimal import Decimal

    import sys
    from cStringIO import StringIO
    backup = sys.stdout
    sys.stdout = StringIO()

    bitcoinaddress_sum = BitcoinAddress.objects.filter(active=True)\
        .aggregate(Sum('least_received_confirmed'))['least_received_confirmed__sum'] or Decimal(0)
    print "Total received, sum", bitcoinaddress_sum
    transaction_wallets_sum = WalletTransaction.objects.filter(from_wallet__id__gt=0, to_wallet__id__gt=0)\
        .aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
    print "Total transactions, sum", transaction_wallets_sum
    transaction_out_sum = WalletTransaction.objects.filter(from_wallet__id__gt=0)\
        .exclude(to_bitcoinaddress="").exclude(to_bitcoinaddress="")\
        .aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
    print "Total outgoing, sum", transaction_out_sum
    # for x in WalletTransaction.objects.filter(from_wallet__id__gt=0, to_wallet__isnull=True, to_bitcoinaddress=""):
    #   print x.amount, x.created_at
    fee_sum = WalletTransaction.objects.filter(from_wallet__id__gt=0, to_wallet__isnull=True, to_bitcoinaddress="")\
        .aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
    print "Fees, sum", fee_sum
    print "DB balance", (bitcoinaddress_sum - transaction_out_sum - fee_sum)
    print "----"
    bitcoind_balance = bitcoind.bitcoind_api.getbalance()
    print "Bitcoind balance", bitcoind_balance
    print "----"
    print "Wallet quick check"
    total_sum = Decimal(0)
    for w in Wallet.objects.filter(last_balance__lt=0):
        if w.total_balance()<0:
            bal = w.total_balance()
            # print w.id, bal
            total_sum += bal
    print "Negatives:", Wallet.objects.filter(last_balance__lt=0).count(), "Amount:", total_sum
    print "Migration check"
    tot_received = WalletTransaction.objects.filter(from_wallet=None).aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
    tot_received_bitcoinaddress = BitcoinAddress.objects.filter(migrated_to_transactions=True)\
        .aggregate(Sum('least_received_confirmed'))['least_received_confirmed__sum'] or Decimal(0)
    tot_received_unmigrated = BitcoinAddress.objects.filter(migrated_to_transactions=False)\
        .aggregate(Sum('least_received_confirmed'))['least_received_confirmed__sum'] or Decimal(0)
    if tot_received != tot_received_bitcoinaddress:
        print "wrong total receive amount! "+str(tot_received)+", "+str(tot_received_bitcoinaddress)
    print "Total " + str(tot_received) + " BTC deposits migrated, unmigrated " + str(tot_received_unmigrated) + " BTC"
    print "Migration check #2"
    dts = DepositTransaction.objects.filter(address__migrated_to_transactions=False).exclude(transaction=None)
    if dts.count() > 0:
        print "Illegal transaction!", dts
    if WalletTransaction.objects.filter(from_wallet=None, deposit_address=None).count() > 0:
        print "Illegal deposit transactions!"
    print "Wallet check"
    for w in Wallet.objects.filter(last_balance__gt=0):
        lb = w.last_balance
        tb_sql = w.total_balance_sql()
        tb = w.total_balance()
        if lb != tb or w.last_balance != tb or tb != tb_sql:
            print "Wallet balance error!", w.id, lb, tb_sql, tb
            print w.sent_transactions.all().count()
            print w.received_transactions.all().count()
            print w.sent_transactions.all().aggregate(Max('created_at'))['created_at__max']
            print w.received_transactions.all().aggregate(Max('created_at'))['created_at__max']
            # Wallet.objects.filter(id=w.id).update(last_balance=w.total_balance_sql())
    # print w.created_at, w.sent_transactions.all(), w.received_transactions.all()
        # if random.random() < 0.001:
        #     sleep(1)
    print "Address check"
    for ba in BitcoinAddress.objects.filter(least_received_confirmed__gt=0, migrated_to_transactions=True):
        dts = DepositTransaction.objects.filter(address=ba, wallet=ba.wallet)
        s = dts.aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
        if s != ba.least_received:
            print "DepositTransaction error", ba.address, ba.least_received, s
            print "BitcoinAddress check"
    for ba in BitcoinAddress.objects.filter(migrated_to_transactions=True):
        dts = ba.deposittransaction_set.filter(address=ba, confirmations__gte=settings.BITCOIN_MINIMUM_CONFIRMATIONS)
        deposit_sum = dts.aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
        wt_sum = WalletTransaction.objects.filter(deposit_address=ba).aggregate(Sum('amount'))['amount__sum'] or Decimal(0)
        if wt_sum != deposit_sum or ba.least_received_confirmed != deposit_sum:
            print "Bitcoinaddress integrity error!", ba.address, deposit_sum, wt_sum, ba.least_received_confirmed
        # if random.random() < 0.001:
        #     sleep(1)

    integrity_test_output = sys.stdout.getvalue() # release output
    # ####

    sys.stdout.close()  # close the stream
    sys.stdout = backup # restore original stdout
    mail_admins("Integrity check", integrity_test_output)

Example 117

Project: sikteeri Source File: pdf.py
    def createData(self, cycle, bill=None, payments=None):
        # TODO: use Django SHORT_DATE_FORMAT
        membercontact = cycle.membership.get_billing_contact()

        # Calculate vat; copied from models.Bill#render_as_text(self)
        vat = Decimal(cycle.get_vat_percentage()) / Decimal(100)
        if self.__type__ == 'reminder':
            amount_paid = cycle.amount_paid()
            sum = cycle.sum - amount_paid
            non_vat_amount = sum / (Decimal(1) + vat)
        else:
            sum = cycle.sum
            non_vat_amount = (cycle.sum / (Decimal(1) + vat))

        # Select due date
        if self.__type__ == 'reminder':
            due_date = u"HETI"
        elif bill:
            due_date = bill.due_date.strftime("%d.%m.%Y")
        else:
            due_date = datetime.now() + timedelta(days=settings.BILL_DAYS_TO_DUE)
            due_date = due_date.strftime("%d.%m.%Y")

        lineitems = []
        # ['1', 'Jäsenmaksu', '04.05.2010 - 04.05.2011', '32.74 €','7.26 €','40.00 €']
        cycle_start_date = cycle.start.strftime('%d.%m.%Y')
        cycle_end_date = cycle.end_date().strftime('%d.%m.%Y')
        lineitems.append(["1",
                      u"Jäsenmaksu",
                      u"%s - %s" % (cycle_start_date, cycle_end_date),
                      u"%s €" % locale.format("%.2f", cycle.sum / (Decimal(1) + vat)),
                      u"%s %%" % locale.format("%d", cycle.get_vat_percentage()),
                      u"%s €" % locale.format("%.2f", vat * non_vat_amount),
                      u"%s €" % locale.format("%.2f", cycle.sum)])
        # Note any payments attached
        if self.__type__ == 'reminder' and amount_paid > 0:
            lineitems.append([
                "2",
                "Maksuja huomioitu yht.",
                "",  # start-end
                "",  # amount
                "",  # vat-percentage
                "",  # vat amount
                "%s €" % locale.format("%.2f", -amount_paid),  # total amount
                ])

        first_bill = cycle.first_bill()
        if bill:
            bill_id = bill.id
        elif first_bill:
            bill_id = first_bill.id
        else:
            bill_id = None
        if bill:
            date = bill.created
        else:
            date = datetime.now()
        if payments:
            latest_payment_date = payments.latest_payment_date()
            if latest_payment_date:
                latest_payments = min([payments.latest_payment_date(), datetime.now()])
            else:
                latest_payments = datetime(year=2003,month=1, day=1)
        else:
            latest_payments = datetime.now()
        self.data = {'name': cycle.membership.name(),
                'address': membercontact.street_address,
                'postal_code':membercontact.postal_code,
                'postal_office':membercontact.post_office,
                'date': date.strftime("%d.%m.%Y"),
                'latest_payment_date': latest_payments.strftime('%d.%m.%Y'),
                'member_id': cycle.membership.id,
                'due_date': due_date,
                'email': membercontact.email,
                'bill_id': bill_id,
                'vat': vat,
                'sum': sum,
                'pretty_sum': locale.format('%.2f', sum),
                'notify_period': '%d vrk' % (settings.REMINDER_GRACE_DAYS,),
                'lineitems': lineitems,
                'reference_number': group_reference(cycle.reference_number)
        }

Example 118

Project: karaage Source File: views.py
@usage_required
def index(request, machine_category_id):
    machine_category = get_object_or_404(
        MachineCategory, pk=machine_category_id)
    mc_list = MachineCategory.objects.exclude(id__exact=machine_category_id)

    result = progress(request)
    if result is not None:
        return result

    start, end = get_date_range(request)

    result = gen_cache_for_machine_category(
        request, start, end, machine_category)
    if result is not None:
        return render(
            template_name='kgusage/progress.html',
            context={'task_id': result.task_id},
            request=request)

    show_zeros = True

    institute_list = Institute.active.all()
    i_list = []
    m_list = []

    mc_cache = usage.get_machine_category_usage(machine_category, start, end)
    total = mc_cache.cpu_time
    total_jobs = mc_cache.no_jobs
    available_time = mc_cache.available_time
    total_time = ((end - start).days + 1) * 24 * 60 * 60
    avg_cpus = available_time / total_time

    for m_cache in models.MachineCache.objects.filter(
            machine__category=machine_category,
            date=datetime.date.today(), start=start, end=end):
        m = m_cache.machine
        time = m_cache.cpu_time
        jobs = m_cache.no_jobs
        m_list.append({'machine': m, 'usage': time, 'jobs': jobs})

    for i_cache in models.InstituteCache.objects.filter(
            machine_category=machine_category,
            date=datetime.date.today(), start=start, end=end):
        i = i_cache.institute
        time = i_cache.cpu_time
        jobs = i_cache.no_jobs

        try:
            quota = InstituteQuota.objects.get(
                institute=i, machine_category=machine_category)
            display_quota = quota.quota
        except InstituteQuota.DoesNotExist:
            display_quota = None

        if display_quota is None and time == 0 and jobs == 0:
            continue

        data_row = {
            'institute': i,
            'usage': time,
            'jobs': jobs,
            'quota': display_quota
        }

        if available_time != 0:
            data_row['percent'] = Decimal(time) / Decimal(available_time) * 100
        else:
            data_row['percent'] = 0
        if data_row['quota'] is not None:
            if data_row['quota'] != 0:
                data_row['p_used'] = (data_row['percent'] /
                                      data_row['quota']) * 100
            else:
                data_row['p_used'] = None
            data_row['diff'] = data_row['percent'] - data_row['quota']
            if data_row['diff'] <= 0:
                data_row['class'] = 'green'
            else:
                data_row['class'] = 'red'
        else:
            data_row['class'] = 'green'
            data_row['diff'] = None
            data_row['p_used'] = None

        i_list.append(data_row)

    # Unused Entry
    unused = {'usage': available_time - total, 'quota': 0}
    if available_time != 0:
        unused['percent'] = (unused['usage'] / available_time) * 100
    else:
        unused['percent'] = 0
    unused['diff'] = unused['percent'] - unused['quota'] / 100
    if unused['diff'] <= 0:
        unused['class'] = 'green'
    else:
        unused['class'] = 'red'

    if available_time != 0:
        utilization = (Decimal(total) / available_time) * 100
    else:
        utilization = 0

    institutes_graph = graphs.get_institute_graph_url(
        start, end, machine_category)
    machines_graph = graphs.get_machine_graph_url(
        start, end, machine_category)
    trend_graph = graphs.get_trend_graph_url(
        start, end, machine_category)

    return render(
        template_name='kgusage/usage_institute_list.html',
        context=locals(),
        request=request)

Example 119

Project: plyer Source File: __init__.py
    def _get_ticks(self, major, minor, log, s_min, s_max):
        if major and s_max > s_min:
            if log:
                s_min = log10(s_min)
                s_max = log10(s_max)
                # count the decades in min - max. This is in actual decades,
                # not logs.
                n_decades = floor(s_max - s_min)
                # for the fractional part of the last decade, we need to
                # convert the log value, x, to 10**x but need to handle
                # differently if the last incomplete decade has a decade
                # boundary in it
                if floor(s_min + n_decades) != floor(s_max):
                    n_decades += 1 - (10 ** (s_min + n_decades + 1) - 10 **
                                      s_max) / 10 ** floor(s_max + 1)
                else:
                    n_decades += ((10 ** s_max - 10 ** (s_min + n_decades)) /
                                  10 ** floor(s_max + 1))
                # this might be larger than what is needed, but we delete
                # excess later
                n_ticks_major = n_decades / float(major)
                n_ticks = int(floor(n_ticks_major * (minor if minor >=
                                                     1. else 1.0))) + 2
                # in decade multiples, e.g. 0.1 of the decade, the distance
                # between ticks
                decade_dist = major / float(minor if minor else 1.0)

                points_minor = [0] * n_ticks
                points_major = [0] * n_ticks
                k = 0  # position in points major
                k2 = 0  # position in points minor
                # because each decade is missing 0.1 of the decade, if a tick
                # falls in < min_pos skip it
                min_pos = 0.1 - 0.00001 * decade_dist
                s_min_low = floor(s_min)
                # first real tick location. value is in fractions of decades
                # from the start we have to use decimals here, otherwise
                # floating point inaccuracies results in bad values
                start_dec = ceil((10 ** Decimal(s_min - s_min_low - 1)) /
                                 Decimal(decade_dist)) * decade_dist
                count_min = (0 if not minor else
                             floor(start_dec / decade_dist) % minor)
                start_dec += s_min_low
                count = 0  # number of ticks we currently have passed start
                while True:
                    # this is the current position in decade that we are.
                    # e.g. -0.9 means that we're at 0.1 of the 10**ceil(-0.9)
                    # decade
                    pos_dec = start_dec + decade_dist * count
                    pos_dec_low = floor(pos_dec)
                    diff = pos_dec - pos_dec_low
                    zero = abs(diff) < 0.001 * decade_dist
                    if zero:
                        # the same value as pos_dec but in log scale
                        pos_log = pos_dec_low
                    else:
                        pos_log = log10((pos_dec - pos_dec_low
                                         ) * 10 ** ceil(pos_dec))
                    if pos_log > s_max:
                        break
                    count += 1
                    if zero or diff >= min_pos:
                        if minor and not count_min % minor:
                            points_major[k] = pos_log
                            k += 1
                        else:
                            points_minor[k2] = pos_log
                            k2 += 1
                    count_min += 1
                #n_ticks = len(points)
            else:
                # distance between each tick
                tick_dist = major / float(minor if minor else 1.0)
                n_ticks = int(floor((s_max - s_min) / tick_dist) + 1)
                points_major = [0] * int(floor((s_max - s_min) / float(major))
                                         + 1)
                points_minor = [0] * (n_ticks - len(points_major) + 1)
                k = 0  # position in points major
                k2 = 0  # position in points minor
                for m in xrange(0, n_ticks):
                    if minor and m % minor:
                        points_minor[k2] = m * tick_dist + s_min
                        k2 += 1
                    else:
                        points_major[k] = m * tick_dist + s_min
                        k += 1
            del points_major[k:]
            del points_minor[k2:]
        else:
            points_major = []
            points_minor = []
        return points_major, points_minor

Example 120

Project: btcx Source File: mtgox.py
    def _handle_result(self, result, req_id, **kwargs):
        if req_id in self.pending_scall:
            query = self.pending_scall.pop(req_id)
            start = 1 if query['call'].startswith('/') else 0
            name = query['call'][start:]
        else:
            # Result from HTTP API
            name = req_id
            if result['result'] != 'success':
                # Call failed.
                result['params'] = kwargs
                result['params'].update(url=name)
                self._handle_remark(result)
                return
            result = result['data'] if 'data' in result else result['return']

        if name == 'idkey':
            self.evt.emit(name, result)
        elif name == 'orders':
            for order in result:
                self.evt.emit('userorder', self._extract_order(order))
        elif name == 'info':
            # Result from the info method.
            trade_fee = Decimal(str(result['Trade_Fee']))
            rights = result['Rights']
            self.evt.emit(name, (trade_fee, rights))
        elif name == 'wallet/history':
            # Result from the wallet_history method.
            self.evt.emit('wallet_history', result)
        elif name == 'order/add':
            # Result for the order_add method.
            self.evt.emit('order_added', (result, req_id))
        elif name == 'order/cancel':
            # Result for the order_cancel method.
            # Note: result['qid'] is being ignored for now.
            self.evt.emit('order_canceled', (result['oid'], req_id))

        elif name.endswith('/trades/fetch'):
            # Result from the load_trades_since method.
            for trade in result or []:
                trade = self._extract_trade(trade)
                if trade.price is None:
                    continue
                self.evt.emit('trade_fetch', trade)
            # Indicate end of fetch.
            self.evt.emit('trade_fetch', common.TRADE_EMPTY)
        elif name.endswith('/depth/fetch') or name.endswith('/depth/full'):
            # Result from depth_fetch or depth_full method.
            factor = currency_factor(self.currency)
            coin = CURRENCY_FACTOR['BTC'] # XXX
            for typ in ('bid', 'ask'):
                entry = '%ss' % typ
                for order in result[entry]:
                    price = Decimal(order['price_int']) / factor
                    amount = Decimal(order['amount_int']) / coin
                    depth = common.Depth(typ[0], price, amount)
                    self.evt.emit('depth_fetch', depth)
            # Indicate end of fetch.
            self.evt.emit('depth_fetch', common.DEPTH_EMPTY)
        elif name.endswith('/ticker'):
            self.evt.emit('ticker_fetch',
                    self._extract_ticker(result, restrict_currency=False))
        elif name.endswith('/currency'):
            self.evt.emit('currency_info', result)

        else:
            rtype = name.lstrip('/').replace('/', '_')
            if rtype[0] in ('1', '2') and rtype[1] == '_':
                # Assuming this is the result of an HTTP API call
                # and the version used is not interesting.
                rtype = rtype[2:]
            log.msg("Emitting result event for %s" % rtype)
            self.evt.emit('result', (rtype, result))

Example 121

Project: weboob Source File: pages.py
    def iter_accounts(self, next_pages):
        account_type = Account.TYPE_UNKNOWN

        params = self.get_params()
        actions = self.get_button_actions()

        for div in self.docuement.getroot().cssselect('div.btit'):
            if div.text in (None, u'Synthèse'):
                continue
            account_type = self.ACCOUNT_TYPES.get(div.text.strip(), Account.TYPE_UNKNOWN)

            if account_type is None:
                # ignore services accounts
                self.logger.debug('Ignore account type %s', div.text.strip())
                continue

            # Go to the full list of this kind of account, if any.
            btn = div.getparent().xpath('.//button/span[text()="Suite"]')
            if len(btn) > 0:
                btn = btn[0].getparent()
                _params = params.copy()
                _params.update(actions[btn.attrib['id']])
                next_pages.append(_params)
                continue

            currency = None
            for th in div.getnext().xpath('.//thead//th'):
                m = re.match('.*\((\w+)\)$', th.text)
                if m and currency is None:
                    currency = Account.get_currency(m.group(1))

            for tr in div.getnext().xpath('.//tbody/tr'):
                if 'id' not in tr.attrib:
                    continue

                args = dict(parse_qsl(tr.attrib['id']))
                tds = tr.findall('td')

                if len(tds) < 4 or 'identifiant' not in args:
                    self.logger.warning('Unable to parse an account')
                    continue

                account = Account()
                account.id = args['identifiant'].replace(' ', '')
                account.label = u' '.join([u''.join([txt.strip() for txt in tds[1].itertext()]),
                                           u''.join([txt.strip() for txt in tds[2].itertext()])]).strip()

                for pattern, _type in self.PATTERN:
                    match = pattern.match(account.label)
                    if match:
                        account.type = _type
                        break
                    else:
                        account.type = account_type

                balance = FrenchTransaction.clean_amount(u''.join([txt.strip() for txt in tds[3].itertext()]))
                account.balance = Decimal(balance or '0.0')
                account.currency = currency
                if account.type == account.TYPE_LOAN:
                    account.balance = - abs(account.balance)

                account._prev_debit = None
                account._next_debit = None
                account._params = None
                account._coming_params = None
                account._invest_params = None
                if balance != u'' and len(tds[3].xpath('.//a')) > 0:
                    account._params = params.copy()
                    account._params['dialogActionPerformed'] = 'SOLDE'
                    account._params['attribute($SEL_$%s)' % tr.attrib['id'].split('_')[0]] = tr.attrib['id'].split('_', 1)[1]

                if len(tds) >= 5 and len(tds[self.COL_COMING].xpath('.//a')) > 0:
                    _params = account._params.copy()
                    _params['dialogActionPerformed'] = 'ENCOURS_COMPTE'

                    # If there is an action needed before going to the cards page, save it.
                    m = re.search('dialogActionPerformed=([\w_]+)', self.url)
                    if m and m.group(1) != 'EQUIPEMENT_COMPLET':
                        _params['prevAction'] = m.group(1)
                    next_pages.append(_params)

                if not account._params:
                    account._invest_params = params.copy()
                    account._invest_params['dialogActionPerformed'] = 'CONTRAT'
                    account._invest_params['attribute($SEL_$%s)' % tr.attrib['id'].split('_')[0]] = tr.attrib['id'].split('_', 1)[1]

                yield account

        # Needed to preserve navigation.
        btn = self.docuement.xpath('.//button/span[text()="Retour"]')
        if len(btn) > 0:
            btn = btn[0].getparent()
            _params = params.copy()
            _params.update(actions[btn.attrib['id']])
            self.browser.openurl('/cyber/internet/ContinueTask.do', urllib.urlencode(_params))

Example 122

Project: weboob Source File: history.py
Function: parse_page
    def _parse_page(self, page):

        # Regexp
        footnote = re.compile(r'\([0-9]\) ')                # (f)
        ht = re.compile('HT par mois')
        base = re.compile('la base de')
        enddate = re.compile('\d\d\/\d\d\/\d\d')            # YY/MM/DD
        endwithdigit = re.compile('\d+$')                   # blah blah 42
        textwithcoma = re.compile('([a-z]|\d{4})\,')        # blah 2012, blah blah

        # Parsing
        details = []
        for title in ['Abonnement',
                      'Consommation',
                      'Contributions et taxes liées à l\'énergie']:
            section = page.split(title, 1)[1].split('Total ')[0]

            # When a line holds '(0)', a newline is missing.
            section = re.sub(footnote, '\n', section)

            lines = section.split('\n')
            lines = [x for x in lines if len(x) > 0]  # Remove empty lines
            detail = None

            for line in lines:
                if re.match('[A-Za-z]', line[0]):

                    # Things we want to merge with the one just before
                    if 'facturées' in line:
                        # Long lines are sometimes split, so we try to join them
                        # That is the case for:
                        # 'Déduction du montant des consommations
                        # estimées facturées du 00/00/00 au 00/00/00'
                        detail.label = detail.label + u' ' + unicode(line, encoding='utf-8')

                    # Things for which we want a new detail
                    else:
                        # Entering here, we will instantiate a new detail.
                        # We hadn't so before because of fragmented lines.
                        if detail is not None and detail.label is not NotAvailable:
                            # We have a new element, return the other one
                            details.append(detail)
                        detail = Detail()
                        detail.price = Decimal(0)

                        # If the coma is not a decimal separator, then
                        # this is is probably a loooong sentence.
                        # When it comes to jokes, keep it short and sweet.
                        line = re.split(textwithcoma, line)[0]

                        # Things we want for sure
                        if re.findall(enddate, line):
                            # When a line has been badly split after a date,
                            # We want the label to end after the date, and maybe
                            # the second part to be the info
                            mydate = re.search(enddate, line).group(0)
                            mylist = line.rpartition(mydate)
                            label = mylist[0] + mylist[1]
                            detail.label = unicode(label, encoding='utf-8')
                        elif re.findall(endwithdigit, line):
                            # What is this stupid number at the end of the line?
                            # Line should have been split before the number
                            detail.label = unicode(re.split(endwithdigit, line)[0], encoding='utf-8')
                        # Things we don't want for sure
                        elif ')' in line and '(' not in line:
                            # First part of the parenthesis should have been drop before
                            # Avoid to create a new empty detail
                            detail.label = NotAvailable
                        elif re.match(base, line):
                            # This string should come always after a date,
                            # usually, it will match one of the cases above.
                            # Sometimes, it appears on a new line we don't need.
                            detail.label = NotAvailable
                        elif re.match(ht, line):
                            # '00,00 € HT par mois' may have been split after HT
                            # We don't need of the second line
                            detail.label = NotAvailable
                        # Things we probably want to keep
                        else:
                            # Well, maybe our line is correct, after all.
                            # Not much to do.
                            detail.label = unicode(line, encoding='utf-8')
                        detail.infos = NotAvailable
                elif ' %' in line:
                    if isinstance(detail, Detail):
                        # Sometimes the vat is not on a new line:
                        # '00,00 00,0 %' instead of '00,0 %'
                        vat = line.split()[line.count(' ')-1].replace(',', '.')
                        detail.infos = unicode('TVA: ' + vat)
                elif ' €' in line:
                    price = line.replace(',', '.')
                    if isinstance(detail, Detail):
                        detail.price = Decimal(price.strip(' €'))
                elif re.match(enddate, line):
                    # Line holding dates may have been mixed up
                    label = detail.label.split(' au ')[0] + u' au ' + unicode(line, encoding='utf-8')
                    detail.label = label
            if detail.label is not NotAvailable:
                # Do not append empty details to the list
                # It seemed easier to create details anyway than dealing
                # with None objects
                details.append(detail)
        return details

Example 123

Project: openrtb Source File: mobile.py
Function: init
    def __init__(self, brq):
        self.mobile_brq = brq
        params = {
            'id': brq.id,
            'imp': [
                request.Impression(
                    id=imp.impid,
                    banner=request.Banner(
                        w=imp.w,
                        h=imp.h,
                        pos=imp.pos,
                        battr=imp.battr,
                        btype=imp.btype,
                    ),
                    bidfloor=Decimal(0),
                ) for imp in brq.imp
            ],
            'at': brq.at,
            'tmax': brq.tmax
        }
        if brq.site:
            params['site'] = request.Site(
                id=brq.site.sid,
                name=brq.site.name,
                domain=brq.site.domain,
                publisher=request.Publisher(
                    id=brq.site.pid,
                    name=brq.site.pub,
                    domain=brq.site.pdomain
                ),
                cat=brq.site.cat,
                keywords=brq.site.keywords,
                page=brq.site.page,
                ref=brq.site.ref,
                search=brq.site.search,
            )
        if brq.app:
            params['app'] = request.App(
                id=brq.app.aid,
                name=brq.app.name,
                domain=brq.app.domain,
                publisher=request.Publisher(
                    id=brq.app.pid,
                    name=brq.app.pub,
                    pdomain=brq.app.pdomain,
                ),
                cat=brq.app.cat,
                keywords=brq.app.keywords,
                ver=brq.app.ver,
                bundle=brq.app.bundle,
                paid=brq.app.paid,
            )
        if brq.device:
            lat = lon = None
            if brq.device.loc:
                lat, lon = brq.device.loc.split(',')
                lat, lon = float(lat), float(lon)
            params['device'] = request.Device(
                didsha1=brq.device.did,
                dpidsha1=brq.device.dpid,
                ip=brq.device.ip,
                geo=request.Geo(
                    lat=lat,
                    lon=lon,
                    country=brq.device.country,
                ),
                carrier=brq.device.carrier,
                ua=brq.device.ua,
                make=brq.device.make,
                model=brq.device.model,
                os=brq.device.os,
                osv=brq.device.osv,
                js=brq.device.js,
                connectiontype=constants.ConnectionType.CELLULAR_UNKNOWN_G,
                devicetype=constants.DeviceType.MOBILE
            )
        if brq.user:
            params['user'] = request.User(
                id=brq.user.uid,
                yob=brq.user.yob,
                gender=brq.user.gender,
                keywords=brq.user.keywords,
                geo=request.Geo(
                    country=brq.user.country,
                    zip=brq.user.zip
                )
            )
        if brq.restrictions:
            params['badv'] = brq.restrictions.badv
            params['bcat'] = brq.restrictions.bcat

        self.brq = request.BidRequest(**params)

Example 124

Project: libreshop Source File: models.py
    @property
    def cost_of_goods_sold(self):
        '''
        The Cost of Goods Sold (COGS) of the Purchase.
        '''
        from fulfillment.models import FulfillmentPurchase
        from inventory.models import Supply

        cost = Decimal(0.00)

        # Determine whether or not this is a drop-shipped Purchase.
        if self.variant.fulfillment_settings:
            try:
                # Try to get the total cost of a backend drop-shipment Purchase.
                cost = self.fulfillment_purchase.total
                logger.debug('Purchase has been submitted for fulfillment.')
            except FulfillmentPurchase.DoesNotExist as e:
                logger.debug('Purchase has been not submitted for fulfillment.')

        else:
            # Determine the amount of each respective raw material in Inventory
            # that is consumed to produce this Purchase.

            inventory_consumed = {
                component.inventory:component.quantity
                for component in self.variant.components.all()}
            logger.debug(
                'Inventory consumed by this Purchase: %s' % inventory_consumed)

            # Determine the amount of all raw materials taken from Inventory
            # prior to this Purchase.

            prior_inventory_consumed = [
                (component.inventory, component.quantity)
                for purchases
                    in Purchase.objects.filter(created__lt=self.created)
                for component in purchases.variant.components.filter(
                    inventory__in=inventory_consumed)]
            prior_inventory_aggregate = {
                inventory:sum(quantity[1] for quantity in quantities)
                for (inventory, quantities)
                in groupby(sorted(prior_inventory_consumed), lambda x: x[0])}
            logger.debug(
                'Inventory consumed before this Purchase: %s' %
                prior_inventory_aggregate)

            # Determine the range of Inventory consumed by this Purchase
            # with respect to incoming Supply.

            inventory_consumed_ranges = {
                inventory:(
                    prior_inventory_aggregate.get(inventory, Decimal(0.00)),
                    prior_inventory_aggregate.get(inventory, Decimal(0.00)) +
                    value)
                for (inventory, value) in inventory_consumed.items()}

            logger.debug(
                'Inventory ranges consumed by this Purchase: %s' %
                inventory_consumed_ranges)

            # Calculate the cost of goods sold for this Purchase.
            for inventory, range_ in inventory_consumed_ranges.items():

                # Create a number line of all Supply received for this
                # particular raw material, along with its associated unit cost.

                supplies = Supply.objects.filter(
                    inventory=inventory, receipt_date__isnull=False)
                price_history = {(
                    sum(supply.units_received for supply in supplies[:i]),
                    sum(supply.units_received for supply in supplies[:i]) +
                    supply.units_received): supply.unit_cost
                    for (i, supply) in enumerate(supplies)}
                price_history = OrderedDict(
                    sorted(price_history.items(), key=lambda x: x[0]))

                logger.debug(
                    'Price history for %s is: %s' % (inventory, price_history))

                # Associate the number line with the range of Supply consumed
                # by this Purchase.

                purchase_start, purchase_end = range_
                for range_, unit_cost in price_history.items():
                    units_from_supply = 0
                    supply_start, supply_end = range_

                    # If the Supply was consumed already, continue searching.
                    if supply_end < purchase_start:
                        continue

                    # The Purchase is manufactured from a single Supply.
                    elif supply_start <= purchase_start < purchase_end <= supply_end:
                        units_from_supply = inventory_consumed[inventory]

                    # The entire Supply is used to manufacture the Purchase.
                    elif purchase_start <= supply_start < supply_end <= purchase_end:
                        units_from_supply = supply_end - supply_start

                    # The Purchase consumes the remainder of a Supply.
                    elif supply_start <= purchase_start <= supply_end:
                        units_from_supply = supply_end - purchase_start

                    # The Purchase consumes part of a Supply
                    elif supply_start <= purchase_end <= supply_end:
                        units_from_supply = purchase_end - supply_start

                    # Undefined behavior.
                    else:
                        logger.debug((
                            'Undefined scenario occured for Supply(%s,%s] '
                            'Purchase(%s,%s]') % (
                                supply_start, supply_end,
                                purchase_start, purchase_end))

                    # Add the Supply unit cost to the cost of the Purchase.
                    cost += units_from_supply * unit_cost

                    # Stop calculation if all Inventory is accounted for.
                    inventory_consumed[inventory] -= units_from_supply
                    if inventory_consumed[inventory] <= 0:
                        break

        logger.info(
            'Cost of the "%s" Purchase under Order "%s" is %s.' %
            (self.variant.name, self.order.token, cost))

        return cost

Example 125

Project: lino Source File: jsgen.py
def py2js(v):
    """Note that None values are rendered as ``null`` (not ``undefined``.

    """
    # assert _for_user_profile is not None
    # logger.debug("py2js(%r)",v)
    for cv in CONVERTERS:
        v = cv(v)

    # if isinstance(v,LanguageInfo):
        # return v.django_code

    if isinstance(v, Value):
        return v.as_ext()
        # v = v.as_ext()
        # if not isinstance(v, basestring):
            # raise Exception("20120121b %r is of type %s" % (v,type(v)))
        # return v
    if isinstance(v, Promise):
        # v = force_text(v)
        return json.dumps(force_text(v.encode('utf8')))

    if isinstance(v, types.GeneratorType):
        return "".join([py2js(x) for x in v])
    if etree.iselement(v):
        return json.dumps(force_text(etree.tostring(v)))

    # if type(v) is types.GeneratorType:
        # raise Exception("Please don't call the generator function yourself")
        # return "\n".join([ln for ln in v])
    if callable(v):
        # print 20120114, repr(v)
        # raise Exception("Please call the function yourself")
        return "\n".join([ln for ln in v()])
    if isinstance(v, js_code):
        return str(v.s)  # v.s might be a unicode
    if v is None:
        # return 'undefined'
        return 'null'
    if isinstance(v, (list, tuple)):  # (types.ListType, types.TupleType):
        elems = [py2js(x) for x in v
                 if (not isinstance(x, VisibleComponent))
                 or x.get_view_permission(_for_user_profile)]
        return "[ %s ]" % ", ".join(elems)

    if isinstance(v, dict):
        # 20160423: removed "sorted(v.items())" because it caused
        # TypeError when the dictionary contained a mixture of unicode
        # and future.types.newstr objects.
        try:
            items = [
                i for i in sorted(v.items())
                if (not isinstance(v, VisibleComponent))
                or v.get_view_permission(_for_user_profile)]
        except TypeError as e:
            raise TypeError("Failed to sort {0} : {1}".format(v, e))
        return "{ %s }" % ", ".join(
            ["%s: %s" % (py2js(k), py2js(i)) for k, i in items])

    if isinstance(v, bool):  # types.BooleanType:
        return str(v).lower()
    if isinstance(v, Quantity):
        return '"%s"' % v
    if isinstance(v, (int, decimal.Decimal, fractions.Fraction)):
        return str(v)
    if isinstance(v, IncompleteDate):
        return '"%s"' % v.strftime(settings.SITE.date_format_strftime)
    if isinstance(v, datetime.datetime):
        return '"%s"' % v.strftime(settings.SITE.datetime_format_strftime)
    if isinstance(v, datetime.time):
        return '"%s"' % v.strftime(settings.SITE.time_format_strftime)
    if isinstance(v, datetime.date):
        if v.year < 1900:
            v = IncompleteDate(v)
            return '"%s"' % v.strftime(settings.SITE.date_format_strftime)
        return '"%s"' % v.strftime(settings.SITE.date_format_strftime)

    if isinstance(v, float):
        return repr(v)
    # return json.encoder.encode_basestring(v)
    # print repr(v)
    # http://docs.djangoproject.com/en/dev/topics/serialization/
    # if not isinstance(v, (str,unicode)):
        # raise Exception("20120121 %r is of type %s" % (v,type(v)))
    return json.dumps(v)

Example 126

Project: evething Source File: contracts.py
    def run(self, url, taskstate_id, apikey_id, character_id):
        if self.init(taskstate_id, apikey_id) is False:
            return

        # Make sure the character exists
        try:
            character = Character.objects.select_related('details').get(pk=character_id)
        except Character.DoesNotExist:
            self.log_warn('Character %s does not exist!', character_id)
            return

        now = datetime.datetime.now()

        # Initialise for corporate query
        if self.apikey.key_type == APIKey.CORPORATION_TYPE:
            c_filter = Contract.objects.filter(corporation=self.apikey.corporation)

        # Initialise for character query
        else:
            c_filter = Contract.objects.filter(character=character, corporation__isnull=True)

        params = {'characterID': character_id}
        if self.fetch_api(url, params) is False or self.root is None:
            return

        # Retrieve a list of this user's characters and corporations
        # user_chars = list(Character.objects.filter(apikeys__user=self.apikey.user).values_list('id', flat=True))
        # user_corps = list(APIKey.objects.filter(user=self.apikey.user).exclude(
        #   corpasdasd_character=None).values_list('corpasd_character__corporation__id', flat=True))

        # First we need to get all of the acceptor and assignee IDs
        contract_ids = set()
        station_ids = set()
        lookup_ids = set()
        lookup_corp_ids = set()
        contract_rows = []
        # <row contractID="58108507" issuerID="2004011913" issuerCorpID="751993277" assigneeID="401273477"
        #      acceptorID="0" startStationID="60014917" endStationID="60003760" type="Courier" status="Outstanding"
        #      title="" forCorp="0" availability="Private" dateIssued="2012-08-02 06:50:29"
        #      dateExpired="2012-08-09 06:50:29" dateAccepted="" numDays="7" dateCompleted="" price="0.00"
        #      reward="3000000.00" collateral="0.00" buyout="0.00" volume="10000"/>
        for row in self.root.findall('result/rowset/row'):
            if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                # corp keys don't care about non-corp orders
                if row.attrib['forCorp'] == '0':
                    continue
                # corp keys don't care about orders they didn't issue - another fun
                # bug where corp keys see alliance contracts they didn't make  :ccp:
                if self.apikey.corporation.id not in (
                        int(row.attrib['issuerCorpID']), int(row.attrib['assigneeID']), int(row.attrib['acceptorID'])
                ):
                    # logger.info('Skipping non-corp contract :ccp:')
                    continue

            # non-corp keys don't care about corp orders
            if self.apikey.key_type != APIKey.CORPORATION_TYPE and row.attrib['forCorp'] == '1':
                continue

            contract_ids.add(int(row.attrib['contractID']))

            station_ids.add(int(row.attrib['startStationID']))
            station_ids.add(int(row.attrib['endStationID']))

            lookup_ids.add(int(row.attrib['issuerID']))
            lookup_corp_ids.add(int(row.attrib['issuerCorpID']))

            if row.attrib['assigneeID'] != '0':
                lookup_ids.add(int(row.attrib['assigneeID']))
            if row.attrib['acceptorID'] != '0':
                lookup_ids.add(int(row.attrib['acceptorID']))

            contract_rows.append(row)

        # Fetch bulk data
        char_map = Character.objects.in_bulk(lookup_ids)
        corp_map = Corporation.objects.in_bulk(lookup_ids | lookup_corp_ids)
        alliance_map = Alliance.objects.in_bulk(lookup_ids)
        station_map = Station.objects.in_bulk(station_ids)

        # Add missing IDs as *UNKNOWN* Characters for now
        new = []
        for new_id in lookup_ids.difference(char_map, corp_map, alliance_map, lookup_corp_ids):
            char = Character(
                id=new_id,
                name="*UNKNOWN*",
            )
            new.append(char)
            char_map[new_id] = char

        if new:
            Character.objects.bulk_create(new)

        # Add missing Corporations too
        new = []
        for new_id in lookup_corp_ids.difference(corp_map):
            corp = Corporation(
                id=new_id,
                name="*UNKNOWN*",
            )
            new.append(corp)
            corp_map[new_id] = corp

        if new:
            Corporation.objects.bulk_create(new)

        # Fetch station data

        # Fetch all existing contracts
        c_map = {}
        for contract in c_filter.filter(contract_id__in=contract_ids):
            c_map[contract.contract_id] = contract

        # Finally, after all of that other bullshit, we can actually deal with
        # our goddamn contract rows
        new_contracts = []
        new_events = []

        # <row contractID="58108507" issuerID="2004011913" issuerCorpID="751993277" assigneeID="401273477"
        #      acceptorID="0" startStationID="60014917" endStationID="60003760" type="Courier" status="Outstanding"
        #      title="" forCorp="0" availability="Private" dateIssued="2012-08-02 06:50:29" dateExpired="2012-08-09 06:50:29"
        #      dateAccepted="" numDays="7" dateCompleted="" price="0.00" reward="3000000.00" collateral="0.00" buyout="0.00"
        #      volume="10000"/>
        for row in contract_rows:
            contractID = int(row.attrib['contractID'])

            issuer_char = char_map.get(int(row.attrib['issuerID']))
            if issuer_char is None:
                self.log_warn('Invalid issuerID %s', row.attrib['issuerID'])
                continue

            issuer_corp = corp_map.get(int(row.attrib['issuerCorpID']))
            if issuer_corp is None:
                self.log_warn('Invalid issuerCorpID %s', row.attrib['issuerCorpID'])
                continue

            start_station = station_map.get(int(row.attrib['startStationID']))
            if start_station is None:
                self.log_warn('Invalid startStationID %s', row.attrib['startStationID'])
                continue

            end_station = station_map.get(int(row.attrib['endStationID']))
            if end_station is None:
                self.log_warn('Invalid endStationID %s', row.attrib['endStationID'])
                continue

            assigneeID = int(row.attrib['assigneeID'])
            acceptorID = int(row.attrib['acceptorID'])

            dateIssued = self.parse_api_date(row.attrib['dateIssued'])
            dateExpired = self.parse_api_date(row.attrib['dateExpired'])

            dateAccepted = row.attrib['dateAccepted']
            if dateAccepted:
                dateAccepted = self.parse_api_date(dateAccepted)
            else:
                dateAccepted = None

            dateCompleted = row.attrib['dateCompleted']
            if dateCompleted:
                dateCompleted = self.parse_api_date(dateCompleted)
            else:
                dateCompleted = None

            type = row.attrib['type']
            if type == 'ItemExchange':
                type = 'Item Exchange'

            contract = c_map.get(contractID, None)
            # Contract exists, maybe update stuff
            if contract is not None:
                if contract.status != row.attrib['status']:
                    text = "Contract %s changed status from '%s' to '%s'" % (
                        contract, contract.status, row.attrib['status'])

                    new_events.append(Event(
                        user_id=self.apikey.user.id,
                        issued=now,
                        text=text,
                    ))

                    contract.status = row.attrib['status']
                    contract.date_accepted = dateAccepted
                    contract.date_completed = dateCompleted
                    contract.acceptor_id = acceptorID
                    contract.save()

            # Contract does not exist, make a new one
            else:
                contract = Contract(
                    character=character,
                    contract_id=contractID,
                    issuer_char=issuer_char,
                    issuer_corp=issuer_corp,
                    assignee_id=assigneeID,
                    acceptor_id=acceptorID,
                    start_station=station_map[int(row.attrib['startStationID'])],
                    end_station=station_map[int(row.attrib['endStationID'])],
                    type=type,
                    status=row.attrib['status'],
                    title=row.attrib['title'],
                    for_corp=(row.attrib['forCorp'] == '1'),
                    public=(row.attrib['availability'].lower() == 'public'),
                    date_issued=dateIssued,
                    date_expired=dateExpired,
                    date_accepted=dateAccepted,
                    date_completed=dateCompleted,
                    num_days=int(row.attrib['numDays']),
                    price=Decimal(row.attrib['price']),
                    reward=Decimal(row.attrib['reward']),
                    collateral=Decimal(row.attrib['collateral']),
                    buyout=Decimal(row.attrib['buyout']),
                    volume=Decimal(row.attrib['volume']),
                )
                if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                    contract.corporation = self.apikey.corporation

                new_contracts.append(contract)

                # If this contract is a new contract in a non-completed state, log an event
                if contract.status in ('Outstanding', 'InProgress'):
                    # if assigneeID in user_chars or assigneeID in user_corps:
                    assignee = char_map.get(assigneeID, corp_map.get(assigneeID, alliance_map.get(assigneeID)))
                    if assignee is not None:
                        text = "Contract %s was created from '%s' to '%s' with status '%s'" % (
                            contract, contract.get_issuer_name(), assignee.name, contract.status)

                        new_events.append(Event(
                            user_id=self.apikey.user.id,
                            issued=now,
                            text=text,
                        ))

        # And save the damn things
        Contract.objects.bulk_create(new_contracts)
        Event.objects.bulk_create(new_events)

        # Force the queryset to update
        # c_filter.update()

        # # Now go fetch items for each contract
        # items_url = url.replace('Contracts', 'ContractItems')
        # new = []
        # seen_contracts = []
        # # Apparently courier contracts don't have ContractItems support? :ccp:
        # for contract in c_filter.filter(retrieved_items=False).exclude(type='Courier'):
        #     params['contractID'] = contract.contract_id
        #     if self.fetch_api(items_url, params) is False or self.root is None:
        #         continue

        #     for row in self.root.findall('result/rowset/row'):
        #         new.append(ContractItem(
        #             contract_id=contract.contract_id,
        #             item_id=row.attrib['typeID'],
        #             quantity=row.attrib['quantity'],
        #             raw_quantity=row.attrib.get('rawQuantity', 0),
        #             singleton=row.attrib['singleton'] == '1',
        #             included=row.attrib['included'] == '1',
        #         ))

        #     seen_contracts.append(contract.contract_id)

        # if new:
        #     ContractItem.objects.bulk_create(new)
        #     c_filter.filter(contract_id__in=seen_contracts).update(retrieved_items=True)

        return True

Example 127

Project: evething Source File: marketorders.py
    def run(self, url, taskstate_id, apikey_id, character_id):
        if self.init(taskstate_id, apikey_id) is False:
            return

        # Make sure the character exists
        try:
            character = Character.objects.select_related('details').get(pk=character_id)
        except Character.DoesNotExist:
            self.log_warn('Character %s does not exist!', character_id)
            return

        # Initialise for corporate key
        if self.apikey.key_type == APIKey.CORPORATION_TYPE:
            mo_filter = MarketOrder.objects.filter(corp_wallet__corporation=character.corporation)

            wallet_map = {}
            for cw in CorpWallet.objects.filter(corporation=character.corporation):
                wallet_map[cw.account_key] = cw

        # Initialise for other keys
        else:
            mo_filter = MarketOrder.objects.filter(corp_wallet=None, character=character)

        mo_filter = mo_filter.select_related('item')

        # Fetch the API data
        params = {'characterID': character_id}
        if self.fetch_api(url, params) is False or self.root is None:
            return

        # Generate an order_id map
        order_map = {}
        for mo in mo_filter:
            order_map[mo.order_id] = mo

        # Iterate over the returned result set
        char_ids = set()
        item_ids = set()
        station_ids = set()

        rows = []
        seen = []
        for row in self.root.findall('result/rowset/row'):
            order_id = int(row.attrib['orderID'])

            # Order exists
            order = order_map.get(order_id)
            if order is not None:
                # Order is still active, update relevant details
                if row.attrib['orderState'] == '0':
                    issued = self.parse_api_date(row.attrib['issued'])
                    volRemaining = int(row.attrib['volRemaining'])
                    escrow = Decimal(row.attrib['escrow'])
                    price = Decimal(row.attrib['price'])

                    if issued > order.issued or volRemaining != order.volume_remaining or \
                       escrow != order.escrow or price != order.price:
                        order.issued = issued
                        order.expires = issued + datetime.timedelta(int(row.attrib['duration']))
                        order.volume_remaining = volRemaining
                        order.escrow = escrow
                        order.price = price
                        order.total_price = order.volume_remaining * order.price
                        order.save()

                    seen.append(order_id)

            # Doesn't exist and is active, save data for later
            elif row.attrib['orderState'] == '0':
                char_ids.add(int(row.attrib['charID']))
                item_ids.add(int(row.attrib['typeID']))
                station_ids.add(int(row.attrib['stationID']))

                rows.append(row)
                seen.append(order_id)

        # Bulk query data
        # char_map = Character.objects.in_bulk(char_ids)
        item_map = Item.objects.in_bulk(item_ids)
        station_map = Station.objects.in_bulk(station_ids)

        # Create new MarketOrder objects
        new = []
        for row in rows:
            item = item_map.get(int(row.attrib['typeID']))
            if item is None:
                self.log_warn("No matching Item %s", row.attrib['typeID'])
                continue

            station = station_map.get(int(row.attrib['stationID']))
            if station is None:
                self.log_warn("No matching Station %s", row.attrib['stationID'])
                continue

            # Create the new order object
            buy_order = (row.attrib['bid'] == '1')
            remaining = int(row.attrib['volRemaining'])
            price = Decimal(row.attrib['price'])
            issued = self.parse_api_date(row.attrib['issued'])

            order = MarketOrder(
                order_id=row.attrib['orderID'],
                station=station,
                item=item,
                character=character,
                escrow=Decimal(row.attrib['escrow']),
                creator_character_id=row.attrib['charID'],
                price=price,
                total_price=remaining * price,
                buy_order=buy_order,
                volume_entered=int(row.attrib['volEntered']),
                volume_remaining=remaining,
                minimum_volume=int(row.attrib['minVolume']),
                issued=issued,
                expires=issued + datetime.timedelta(int(row.attrib['duration'])),
            )
            # Set the corp_wallet for corporation API requests
            if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                order.corp_wallet = wallet_map.get(int(row.attrib['accountKey']))

            new.append(order)

        # Insert any new orders
        if new:
            MarketOrder.objects.bulk_create(new)

        # Any orders we didn't see need to be deleted - issue events first
        now = datetime.datetime.now()
        to_delete = mo_filter.exclude(pk__in=seen)
        new_events = []
        for order in to_delete.select_related():
            if order.buy_order:
                buy_sell = 'buy'
            else:
                buy_sell = 'sell'

            if order.corp_wallet:
                order_type = 'corporate'
            else:
                order_type = 'personal'

            url = '%s?ft=item&fc=eq&fv=%s' % (reverse('thing.views.transactions'), order.item.name)
            text = '%s: %s %s order for <a href="%s">%s</a> completed/expired' % (
                order.station.short_name,
                order_type,
                buy_sell,
                url,
                order.item.name,
            )
            if order.corp_wallet:
                text = '%s ([%s] %s)' % (text, order.corp_wallet.corporation.ticker, order.corp_wallet.description)
            else:
                text = '%s (%s)' % (text, order.character.name)

            new_events.append(Event(
                user_id=self.apikey.user.id,
                issued=now,
                text=text,
            ))

        # Bulk create new events
        Event.objects.bulk_create(new_events)

        # Then delete
        to_delete.delete()

        return True

Example 128

Project: evething Source File: wallettransactions.py
    def _work(self, url, character, corp_wallet=None):
        # Initialise stuff
        params = {
            'characterID': character.id,
            'rowCount': TRANSACTION_ROWS,
        }

        # Corporation key
        if self.apikey.key_type == APIKey.CORPORATION_TYPE:
            params['accountKey'] = corp_wallet.account_key
            t_filter = Transaction.objects.filter(corp_wallet=corp_wallet)
        # Account/Character key
        else:
            t_filter = Transaction.objects.filter(corp_wallet=None, character=character)

        # Stuff to collect
        bulk_data = {}
        char_ids = set()
        item_ids = set()
        station_ids = set()

        # Loop until we run out of transactions
        while True:
            if self.fetch_api(url, params) is False or self.root is None:
                return False

            rows = self.root.findall('result/rowset/row')
            # empty result set = no transactions ever on this wallet
            if not rows:
                break

            # Gather bulk data
            for row in rows:
                transaction_id = int(row.attrib['transactionID'])
                bulk_data[transaction_id] = row

                char_ids.add(int(row.attrib['clientID']))
                item_ids.add(int(row.attrib['typeID']))
                station_ids.add(int(row.attrib['stationID']))

                if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                    char_ids.add(int(row.attrib['characterID']))

            # If we got MAX rows we should retrieve some more
            if len(rows) == TRANSACTION_ROWS:
                params['beforeTransID'] = transaction_id
            else:
                break

        # Retrieve any existing transactions
        t_ids = set(t_filter.filter(transaction_id__in=bulk_data.keys()).values_list('transaction_id', flat=True))

        # Fetch bulk data
        char_map = Character.objects.in_bulk(char_ids)
        corp_map = Corporation.objects.in_bulk(char_ids.difference(char_map))
        item_map = Item.objects.in_bulk(item_ids)
        station_map = Station.objects.in_bulk(station_ids)

        # Iterate over scary data
        new = []
        for transaction_id, row in bulk_data.items():
            transaction_time = self.parse_api_date(row.attrib['transactionDateTime'])

            # Skip corporate transactions if this is a personal call, we have no idea
            # what CorpWallet this transaction is related to otherwise :ccp:
            if(row.attrib['transactionFor'].lower() == 'corporation'
                    and self.apikey.key_type != APIKey.CORPORATION_TYPE):
                continue

            # Handle possible new clients
            client_id = int(row.attrib['clientID'])
            client = char_map.get(client_id, corp_map.get(client_id, None))
            if client is None:
                try:
                    client = Character.objects.create(
                        id=client_id,
                        name=row.attrib['clientName'],
                    )
                except IntegrityError:
                    client = Character.objects.get(id=client_id)

                char_map[client_id] = client

            # Check to see if this transaction already exists
            if transaction_id not in t_ids:
                # Make sure the item is valid
                item = item_map.get(int(row.attrib['typeID']))
                if item is None:
                    self.log_warn('Invalid item_id %s', row.attrib['typeID'])
                    continue

                # Make sure the station is valid
                station = station_map.get(int(row.attrib['stationID']))
                if station is None:
                    self.log_warn('Invalid station_id %s', row.attrib['stationID'])
                    continue

                # For a corporation key, make sure the character exists
                if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                    char_id = int(row.attrib['characterID'])
                    char = char_map.get(char_id, None)
                    # Doesn't exist, create it
                    if char is None:
                        char = Character.objects.create(
                            id=char_id,
                            name=row.attrib['characterName'],
                            corporation=self.apikey.corporation,
                        )
                        char_map[char_id] = char
                # Any other key = just use the supplied character
                else:
                    char = character

                # Create a new transaction object and save it
                quantity = int(row.attrib['quantity'])
                price = Decimal(row.attrib['price'])
                buy_transaction = (row.attrib['transactionType'] == 'buy')

                t = Transaction(
                    station=station,
                    item=item,
                    character=char,
                    transaction_id=transaction_id,
                    date=transaction_time,
                    buy_transaction=buy_transaction,
                    quantity=quantity,
                    price=price,
                    total_price=quantity * price,
                )

                # Set the corp_wallet for corporation API requests
                if self.apikey.key_type == APIKey.CORPORATION_TYPE:
                    t.corp_wallet = corp_wallet

                # Set whichever client type is relevant
                if isinstance(client, Character):
                    t.other_char_id = client.id
                else:
                    t.other_corp_id = client.id

                new.append(t)

        # Create any new transaction objects
        if new:
            Transaction.objects.bulk_create(new)

        return True

Example 129

Project: evething Source File: assets.py
@login_required
def assets_filter(request):
    """Assets filter"""
    tt = TimerThing('assets')

    characters = Character.objects.filter(apikeys__user=request.user.id).distinct()
    character_ids = []
    character_map = {}
    for character in characters:
        character_ids.append(character.id)
        character_map[character.id] = character

    corp_ids = Corporation.get_ids_with_access(request.user, APIKey.CORP_ASSET_LIST_MASK)
    corporations = Corporation.objects.filter(pk__in=corp_ids)
    corporation_ids = []
    corporation_map = {}
    for corporation in corporations:
        corporation_ids.append(corporation.id)
        corporation_map[corporation.id] = corporation

    # apply our initial set of filters
    assets = Asset.objects.filter(
        Q(character__in=character_ids, corporation_id=0)
        |
        Q(corporation_id__in=corporation_ids)
    )
    assets = assets.prefetch_related('item__item_group__category', 'inv_flag', 'system', 'station')
    # assets = assets.distinct()

    tt.add_time('init')

    # Parse and apply filters
    filters = parse_filters(request, ASSETS_EXPECTED)

    if 'char' in filters:
        qs = []
        for fc, fv in filters['char']:
            if fc == 'eq':
                qs.append(Q(character=fv, corporation_id=0))
            elif fc == 'ne':
                qs.append(~Q(character=fv, corporation_id=0))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'corp' in filters:
        qs = []
        for fc, fv in filters['corp']:
            if fc == 'eq':
                if fv == -1:
                    qs.append(Q(corporation_id__gt=0))
                else:
                    qs.append(Q(corporation_id=fv))
            elif fc == 'ne':
                if fv == -1:
                    qs.append(Q(corporation_id=0))
                else:
                    qs.append(~Q(corporation_id=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'invflag' in filters:
        qs = []
        for fc, fv in filters['invflag']:
            if fc == 'eq' and fv.isdigit():
                qs.append(Q(inv_flag_id=fv))
            elif fc == 'ne' and fv.isdigit():
                qs.append(~Q(inv_flag_id=fv))
            elif fc == 'in':
                qs.append(Q(inv_flag__name__icontains=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'item' in filters:
        qs = []
        for fc, fv in filters['item']:
            if fc == 'eq':
                qs.append(Q(item__name=fv))
            elif fc == 'ne':
                qs.append(~Q(item__name=fv))
            elif fc == 'in':
                qs.append(Q(item__name__icontains=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'itemcat' in filters:
        qs = []
        for fc, fv in filters['itemcat']:
            if fc == 'eq':
                if fv.isdigit():
                    qs.append(Q(item__item_group__category=fv))
                else:
                    qs.append(Q(item__item_group__category__name=fv))
            elif fc == 'ne':
                if fv.isdigit():
                    qs.append(~Q(item__item_group__category=fv))
                else:
                    qs.append(~Q(item__item_group__category__name=fv))
            elif fc == 'in':
                qs.append(Q(item__item_group__category__name__icontains=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'station' in filters:
        qs = []
        for fc, fv in filters['station']:
            if fc == 'eq':
                if fv.isdigit():
                    qs.append(Q(station=fv))
                else:
                    qs.append(Q(station__name=fv))
            elif fc == 'ne':
                if fv.isdigit():
                    qs.append(~Q(station=fv))
                else:
                    qs.append(~Q(station__name=fv))
            elif fc == 'in':
                qs.append(Q(station__name__icontains=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    if 'system' in filters:
        qs = []
        for fc, fv in filters['system']:
            if fc == 'eq':
                if fv.isdigit():
                    qs.append(Q(system=fv))
                else:
                    qs.append(Q(system__name=fv))
            elif fc == 'ne':
                if fv.isdigit():
                    qs.append(~Q(system=fv))
                else:
                    qs.append(~Q(system__name=fv))
            elif fc == 'in':
                qs.append(Q(system__name__icontains=fv))
        assets = assets.filter(reduce(operator.ior, qs))

    tt.add_time('filters')

    asset_map = {}
    for asset in assets:
        asset_map[asset.asset_id] = asset

    tt.add_time('asset map')

    # do parent checks now, ugh
    recurse_you_fool = True
    recurse_assets = assets
    while recurse_you_fool:
        parents = set()
        for asset in recurse_assets:
            if asset.parent not in asset_map:
                parents.add(asset.parent)

        # found some orphan children, better go fetch some more assets
        if parents:
            recurse_assets = Asset.objects.filter(
                asset_id__in=parents,
            ).prefetch_related(
                'item__item_group__category',
                'inv_flag',
                'system',
                'station',
            )

            for asset in recurse_assets:
                asset.z_muted = True
                asset_map[asset.asset_id] = asset

        # No more orphans, escape
        else:
            recurse_you_fool = False

    # initialise data structures
    asset_lookup = {}
    loc_totals = {}
    systems = {}
    last_count = 999999999999999999

    while True:
        assets_now = asset_map.values()
        assets_len = len(assets_now)
        if assets_len == 0:
            break
        if assets_len == last_count:
            print 'infinite loop in assets?! %s' % (assets_len)
            break
        last_count = assets_len

        for asset in assets_now:
            # need to recurse this one later
            if asset.parent and asset_lookup.get(asset.parent) is None:
                continue

            asset.z_contents = []
            asset_lookup[asset.asset_id] = asset
            del asset_map[asset.asset_id]

            # skip missing character ids
            if asset.character_id not in character_map:
                continue

            # character and corporation
            asset.z_character = character_map.get(asset.character_id)
            asset.z_corporation = corporation_map.get(asset.corporation_id)

            # zz blueprints
            if asset.item.item_group.category.name == 'Blueprint':
                asset.z_blueprint = min(-1, asset.raw_quantity)
            else:
                asset.z_blueprint = 0

            # total value of this asset stack
            if asset.z_blueprint >= 0:
                # capital ships!
                if asset.item.item_group.name in ('Capital Industrial Ship', 'Carrier', 'Dreadnought', 'Supercarrier', 'Titan'):
                    asset.z_capital = True
                asset.z_price = asset.item.sell_price
            # BPOs use the base (NPC) price
            elif asset.z_blueprint == -1:
                asset.z_price = asset.item.base_price
            # BPCs count as 0 value for now
            else:
                asset.z_price = 0

            asset.z_total = asset.quantity * asset.z_price
            asset.z_volume = (asset.quantity * asset.item.volume).quantize(Decimal('0.01'))

            # work out if this is a system or station asset
            asset.z_k = asset.system_or_station()
            if asset.z_k not in systems:
                loc_totals[asset.z_k] = 0
                systems[asset.z_k] = []

            # base asset, always add
            if asset.parent == 0:
                asset.z_indent = 0

                loc_totals[asset.z_k] += asset.z_total
                systems[asset.z_k].append(asset)

            # asset is inside something, assign it to parent
            else:
                # parent doesn't exist yet
                parent = asset_lookup.get(asset.parent)
                if parent is None:
                    continue

                # add to parent contents
                parent.z_contents.append(asset)

                # add this to the parent entry in loc_totals
                loc_totals[asset.z_k] += asset.z_total

                # add the total value to every parent of this asset
                p = parent
                while p is not None:
                    p.z_total += asset.z_total
                    p = asset_lookup.get(p.parent)

                # guess at what indent level this should be
                asset.z_indent = getattr(parent, 'z_indent', 0) + 1

                # Celestials (containers) need some special casing
                if parent.item.item_group.category.name == 'Celestial':
                    asset.z_locked = (asset.inv_flag.name == 'Locked')

                    asset.z_type = asset.item.item_group.category.name

                else:
                    # inventory group
                    asset.z_slot = asset.inv_flag.nice_name()
                    # corporation hangar
                    if asset.z_corporation is not None and asset.z_slot.startswith('CorpSAG'):
                        asset.z_slot = getattr(asset.z_corporation, 'division%s' % (asset.z_slot[-1]))

    tt.add_time('main loop')

    # get a total asset value
    total_value = sum(loc_totals.values())

    # decorate/sort/undecorate for our strange sort requirements :(
    for system_name in systems:
        temp = [(asset.z_character.name.lower(), len(asset.z_contents) == 0, asset.item.name, asset.name, asset) for asset in systems[system_name]]
        temp.sort()
        systems[system_name] = [s[-1] for s in temp]

    sorted_systems = sorted(systems.items())

    tt.add_time('sort root')

    # recursively sort asset.z_contents
    for asset_set in systems.values():
        for asset in asset_set:
            _content_sort(asset)

    tt.add_time('sort contents')

    # Render template
    out = render_page(
        'thing/assets_filter.html',
        {
            'json_data': _json_data(characters, corporations, filters),
            'characters': characters,
            'corporations': corporations,
            'total_value': total_value,
            'systems': sorted_systems,
            'loc_totals': loc_totals,
        },
        request,
        character_ids,
        corporation_ids,
    )

    tt.add_time('template')
    if settings.DEBUG:
        tt.finished()

    return out

Example 130

Project: hwrt Source File: beam.py
Function: add_stroke
    def add_stroke(self, new_stroke):
        """
        Update the beam so that it considers `new_stroke`.

        When a `new_stroke` comes, it can either belong to a symbol for which
        at least one other stroke was already made or belong to a symbol for
        which `new_stroke` is the first stroke.

        The number of hypotheses after q strokes without pruning is

            f: N_0 -> N_0
            f(0) = 1
            f(1) = m
            f(q) = f(q-1)*(m+n)

        The number of time the single symbol classifier has to be called, when
        already q hypotheses exist:

            f_s: N_0 -> N_0
            f_s(q) = q*n + 1 (upper bound)

        Parameters
        ----------
        new_stroke : list of dicts
            A list of dicts [{'x': 12, 'y': 34, 'time': 56}, ...] which
            represent a point.
        """
        global single_clf
        if len(self.hypotheses) == 0:  # Don't put this in the constructor!
            self.hypotheses = [{'segmentation': [],
                                'symbols': [],
                                'geometry': {},
                                'probability': Decimal(1)
                                }]
        stroke_nr = len(self.history['data'])
        new_history = deepcopy(self.history)
        new_history['data'].append(new_stroke)
        new_beam = Beam()
        new_beam.history = new_history

        evaluated_segmentations = []

        # Get new guesses by assuming new_stroke belongs to an already begun
        # symbol
        had_multisymbol = False
        for hyp in self.hypotheses:
            # Add stroke to last n symbols (seperately)
            for i in range(min(self.n, len(hyp['segmentation']))):
                # Build stroke data
                new_strokes = {'data': [], 'id': -1}
                for stroke_index in hyp['segmentation'][-(i+1)]:
                    curr_stroke = self.history['data'][stroke_index]
                    new_strokes['data'].append(curr_stroke)
                new_strokes['data'].append(new_stroke)

                new_seg = deepcopy(hyp['segmentation'])
                new_seg[-(i+1)].append(stroke_nr)

                if new_seg in evaluated_segmentations:
                    continue
                else:
                    evaluated_segmentations.append(new_seg)

                # Predict this new collection of strokes
                guesses = single_clf.predict(new_strokes)[:self.m]
                for guess in guesses:
                    if guess['semantics'].split(";")[1] == "::MULTISYMBOL::":
                        # This was a wrong segmentation. Ignore it.
                        had_multisymbol = True
                        continue
                    sym = {'symbol': guess['semantics'],
                           'probability': guess['probability']}
                    new_sym = deepcopy(hyp['symbols'])
                    new_sym[-(i+1)] = sym
                    b = {'segmentation': new_seg,
                         'symbols': new_sym,
                         'geometry': deepcopy(hyp['geometry']),
                         'probability': None
                         }
                    new_beam.hypotheses.append(b)

        if len(self.hypotheses) <= 1 or had_multisymbol:
            self._add_hypotheses_assuming_new_stroke(new_stroke,
                                                     stroke_nr,
                                                     new_beam)

        for hyp in new_beam.hypotheses:
            hyp['probability'] = _calc_hypothesis_probability(hyp)

        # Get probability again

        # Get geometry of each beam entry
        # TODO

        # Update probabilities
        # TODO

        # Normalize to sum=1
        self.hypotheses = new_beam.hypotheses
        self.history = new_beam.history
        self._prune()
        new_probs = softmax([h['probability']
                             for h in self.hypotheses])
        for hyp, prob in zip(self.hypotheses, new_probs):
            hyp['probability'] = prob

Example 131

Project: plata Source File: paypal.py
    @csrf_exempt_m
    def ipn(self, request):
        if not request._read_started:
            if 'windows-1252' in request.body.decode('windows-1252', 'ignore'):
                if request.encoding != 'windows-1252':
                    request.encoding = 'windows-1252'
        else:  # middleware (or something else?) has triggered request reading
            if request.POST.get('charset') == 'windows-1252':
                if request.encoding != 'windows-1252':
                    # since the POST data has already been accessed,
                    # unicode characters may have already been lost and
                    # cannot be re-encoded.
                    # -- see https://code.djangoproject.com/ticket/14035
                    # Unfortunately, PayPal:
                    # a) defaults to windows-1252 encoding (why?!)
                    # b) doesn't indicate this in the Content-Type header
                    #    so Django cannot automatically detect it.
                    logger.warning(
                        'IPN received with charset=windows1252, however '
                        'the request encoding does not match. It may be '
                        'impossible to verify this IPN if the data contains '
                        'non-ASCII characters. Please either '
                        'a) update your PayPal preferences to use UTF-8 '
                        'b) configure your site so that IPN requests are '
                        'not ready before they reach the hanlder'
                    )

        PAYPAL = settings.PAYPAL

        if PAYPAL['LIVE']:
            PP_URL = "https://www.paypal.com/cgi-bin/webscr"
        else:
            PP_URL = "https://www.sandbox.paypal.com/cgi-bin/webscr"

        parameters = None

        try:
            parameters = request.POST.copy()
            parameters_repr = repr(parameters).encode('utf-8')

            if parameters:
                logger.info(
                    'IPN: Processing request data %s' % parameters_repr)

                querystring = 'cmd=_notify-validate&%s' % (
                    request.POST.urlencode()
                )
                status = urlopen(PP_URL, querystring).read()

                if not status == b"VERIFIED":
                    logger.error(
                        'IPN: Received status %s, '
                        'could not verify parameters %s' % (
                            status,
                            parameters_repr
                        )
                    )
                    logger.debug('Destination: %r ? %r', PP_URL, querystring)
                    logger.debug('Request: %r', request)
                    return HttpResponseForbidden('Unable to verify')

            if parameters:
                logger.info('IPN: Verified request %s' % parameters_repr)
                reference = parameters['txn_id']
                invoice_id = parameters['invoice']
                currency = parameters['mc_currency']
                amount = parameters['mc_gross']

                try:
                    order, order_id, payment_id = invoice_id.split('-')
                except ValueError:
                    logger.error(
                        'IPN: Error getting order for %s' % invoice_id)
                    return HttpResponseForbidden('Malformed order ID')

                try:
                    order = self.shop.order_model.objects.get(pk=order_id)
                except (self.shop.order_model.DoesNotExist, ValueError):
                    logger.error('IPN: Order %s does not exist' % order_id)
                    return HttpResponseForbidden(
                        'Order %s does not exist' % order_id)

                try:
                    payment = order.payments.get(pk=payment_id)
                except (order.payments.model.DoesNotExist, ValueError):
                    payment = order.payments.model(
                        order=order,
                        payment_module=u'%s' % self.name,
                    )

                payment.status = OrderPayment.PROCESSED
                payment.currency = currency
                payment.amount = Decimal(amount)
                payment.data = request.POST.copy()
                payment.transaction_id = reference
                payment.payment_method = payment.payment_module

                if parameters['payment_status'] == 'Completed':
                    payment.authorized = timezone.now()
                    payment.status = OrderPayment.AUTHORIZED

                payment.save()
                order = order.reload()

                logger.info(
                    'IPN: Successfully processed IPN request for %s' % order)

                if payment.authorized and plata.settings.PLATA_STOCK_TRACKING:
                    StockTransaction = plata.stock_model()
                    self.create_transactions(
                        order,
                        _('sale'),
                        type=StockTransaction.SALE,
                        negative=True,
                        payment=payment)

                if not order.balance_remaining:
                    self.order_paid(order, payment=payment, request=request)

                return HttpResponse("Ok")

        except Exception as e:
            logger.error('IPN: Processing failure %s' % e)
            raise
        else:
            logger.warning('IPN received without POST parameters')
            return HttpResponseForbidden('No parameters provided')

Example 132

Project: plata Source File: base.py
    def create_product(self, stock=0):
        global PRODUCTION_CREATION_COUNTER
        PRODUCTION_CREATION_COUNTER += 1

        tax_class, tax_class_germany, tax_class_something =\
            self.create_tax_classes()

        Product = plata.product_model()
        product = Product.objects.create(
            name='Test Product %s' % PRODUCTION_CREATION_COUNTER,
        )

        if stock:
            product.stock_transactions.create(
                type=StockTransaction.PURCHASE,
                change=stock,
            )

        # An old price in CHF which should not influence the rest of the tests
        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('99.90'),
            tax_included=True,
        )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('199.90'),
            tax_included=True,
            # valid_from=date(2000, 1, 1),
            # valid_until=date(2001, 1, 1),
        )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('299.90'),
            tax_included=True,
            # valid_from=date(2000, 1, 1),
        )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('299.90'),
            tax_included=True,
            # valid_from=date(2000, 7, 1),
            # is_sale=True,
        )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('79.90'),
            tax_included=True,
            # is_sale=True,
        )

        product.prices.create(
            currency='EUR',
            tax_class=tax_class_germany,
            _unit_price=Decimal('49.90'),
            tax_included=True,
        )

        product.prices.create(
            currency='CAD',
            tax_class=tax_class_something,
            _unit_price=Decimal('65.00'),
            tax_included=False,
        )

        """
        # A few prices which are not yet (or no more) active
        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('110.00'),
            tax_included=True,
            #is_active=False,
         )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('120.00'),
            tax_included=True,
            is_active=True,
            valid_from=date(2100, 1, 1),
        )

        product.prices.create(
            currency='CHF',
            tax_class=tax_class,
            _unit_price=Decimal('130.00'),
            tax_included=True,
            is_active=True,
            valid_from=date(2000, 1, 1),
            valid_until=date(2001, 1, 1),
        )
        """

        return product

Example 133

Project: encompass Source File: gtk.py
    def create_send_tab(self):
        
        page = vbox = Gtk.VBox()
        page.show()

        payto = Gtk.HBox()
        payto_label = Gtk.Label(label='Pay to:')
        payto_label.set_size_request(100,-1)
        payto.pack_start(payto_label, False, False, 0)
        payto_entry = Gtk.Entry()
        payto_entry.set_size_request(450, 26)
        payto.pack_start(payto_entry, False, False, 0)
        vbox.pack_start(payto, False, False, 5)

        message = Gtk.HBox()
        message_label = Gtk.Label(label='Description:')
        message_label.set_size_request(100,-1)
        message.pack_start(message_label, False, False, 0)
        message_entry = Gtk.Entry()
        message_entry.set_size_request(450, 26)
        message.pack_start(message_entry, False, False, 0)
        vbox.pack_start(message, False, False, 5)

        amount_box = Gtk.HBox()
        amount_label = Gtk.Label(label='Amount:')
        amount_label.set_size_request(100,-1)
        amount_box.pack_start(amount_label, False, False, 0)
        amount_entry = Gtk.Entry()
        amount_entry.set_size_request(120, -1)
        amount_box.pack_start(amount_entry, False, False, 0)
        vbox.pack_start(amount_box, False, False, 5)

        self.fee_box = fee_box = Gtk.HBox()
        fee_label = Gtk.Label(label='Fee:')
        fee_label.set_size_request(100,-1)
        fee_box.pack_start(fee_label, False, False, 0)
        fee_entry = Gtk.Entry()
        fee_entry.set_size_request(60, 26)
        fee_box.pack_start(fee_entry, False, False, 0)
        vbox.pack_start(fee_box, False, False, 5)

        end_box = Gtk.HBox()
        empty_label = Gtk.Label(label='')
        empty_label.set_size_request(100,-1)
        end_box.pack_start(empty_label, False, False, 0)
        send_button = Gtk.Button("Send")
        send_button.show()
        end_box.pack_start(send_button, False, False, 0)
        clear_button = Gtk.Button("Clear")
        clear_button.show()
        end_box.pack_start(clear_button, False, False, 15)
        send_button.connect("clicked", self.do_send, (payto_entry, message_entry, amount_entry, fee_entry))
        clear_button.connect("clicked", self.do_clear, (payto_entry, message_entry, amount_entry, fee_entry))

        vbox.pack_start(end_box, False, False, 5)

        # display this line only if there is a signature
        payto_sig = Gtk.HBox()
        payto_sig_id = Gtk.Label(label='')
        payto_sig.pack_start(payto_sig_id, False, False, 0)
        vbox.pack_start(payto_sig, True, True, 5)
        

        self.user_fee = False

        def entry_changed( entry, is_fee ):
            self.funds_error = False
            amount = numbify(amount_entry)
            fee = numbify(fee_entry)
            if not is_fee: fee = None
            if amount is None:
                return
            tx = self.wallet.make_unsigned_transaction([('op_return', 'dummy_tx', amount)], fee)
            if not is_fee:
                if tx:
                    fee = tx.get_fee()
                    fee_entry.set_text( str( Decimal( fee ) / 100000000 ) )
                    self.fee_box.show()
            if tx:
                amount_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#000000"))
                fee_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#000000"))
                send_button.set_sensitive(True)
            else:
                send_button.set_sensitive(False)
                amount_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#cc0000"))
                fee_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#cc0000"))
                self.funds_error = True

        amount_entry.connect('changed', entry_changed, False)
        fee_entry.connect('changed', entry_changed, True)        

        self.payto_entry = payto_entry
        self.payto_fee_entry = fee_entry
        self.payto_sig_id = payto_sig_id
        self.payto_sig = payto_sig
        self.amount_entry = amount_entry
        self.message_entry = message_entry
        self.add_tab(page, 'Send')

Example 134

Project: Arelle Source File: streamingExtensions.py
def streamingExtensionsLoader(modelXbrl, mappedUri, filepath):
    # check if big instance and has header with an initial incomplete tree walk (just 2 elements
    def logSyntaxErrors(parsercontext):
        for error in parsercontext.error_log:
            modelXbrl.error("xmlSchema:syntax",
                    _("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
                    modelObject=modelDocuement, fileName=os.path.basename(filepath), 
                    error=error.message, line=error.line, column=error.column, sourceAction="streaming")
    #### note: written for iterparse of lxml prior to version 3.3, otherwise rewrite to use XmlPullParser ###
    #### note: iterparse wants a binary file, but file is text mode
    _file, = modelXbrl.fileSource.file(filepath, binary=True)
    startedAt = time.time()
    modelXbrl.profileActivity()
    parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
    foundInstance = False
    foundErrors = False
    streamingAspects = None
    numRootFacts1 = 0
    numElts = 0
    elt = None
    for event, elt in parsercontext:
        if event == "start":
            if elt.getparent() is not None:
                if elt.getparent().tag == "{http://www.xbrl.org/2003/instance}xbrl":
                    if not foundInstance:
                        foundInstance = True
                        pi = precedingProcessingInstruction(elt, "xbrl-streamable-instance")
                        if pi is None:
                            break
                        else:
                            streamingAspects = dict(pi.attrib.copy())
                    if not elt.tag.startswith("{http://www.xbrl.org/"):
                        numRootFacts1 += 1
                        if numRootFacts1 % 1000 == 0:
                            modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
                elif not foundInstance:       
                    break
            elif elt.tag == "{http://www.xbrl.org/2003/instance}xbrl" and precedingProcessingInstruction(elt, "xbrl-streamable-instance") is not None:
                modelXbrl.error("streamingExtensions:headerMisplaced",
                        _("Header is misplaced: %(error)s, must follow xbrli:xbrl element"),
                        modelObject=elt)
        elif event == "end":
            elt.clear()
            numElts += 1
            if numElts % 1000 == 0 and elt.getparent() is not None:
                while elt.getprevious() is not None and elt.getparent() is not None:
                    del elt.getparent()[0]
    if elt is not None:
        elt.clear()
    _file.seek(0,io.SEEK_SET) # allow reparsing
    if not foundInstance or streamingAspects is None:
        del elt, parsercontext
        _file.close()
        return None
    modelXbrl.profileStat(_("streaming tree check"), time.time() - startedAt)
    startedAt = time.time()
    try:
        version = Decimal(streamingAspects.get("version"))
        if int(version) != 1:
            modelXbrl.error("streamingExtensions:unsupportedVersion",
                    _("Streaming version %(version)s, major version number must be 1"),
                    modelObject=elt, version=version)
            foundErrors = True
    except (InvalidOperation, OverflowError):
        modelXbrl.error("streamingExtensions:versionError",
                _("Version %(version)s, number must be 1.n"),
                modelObject=elt, version=streamingAspects.get("version", "(none)"))
        foundErrors = True
    for bufAspect in ("contextBuffer", "unitBuffer", "footnoteBuffer"):
        try:
            bufLimit = Decimal(streamingAspects.get(bufAspect, "INF"))
            if bufLimit < 1 or (bufLimit.is_finite() and bufLimit % 1 != 0):
                raise InvalidOperation
            elif bufAspect == "contextBuffer":
                contextBufferLimit = bufLimit
            elif bufAspect == "unitBuffer":
                unitBufferLimit = bufLimit
            elif bufAspect == "footnoteBuffer":
                footnoteBufferLimit = bufLimit
        except InvalidOperation:
            modelXbrl.error("streamingExtensions:valueError",
                    _("Streaming %(attrib)s %(value)s, number must be a positive integer or INF"),
                    modelObject=elt, attrib=bufAspect, value=streamingAspects.get(bufAspect))
            foundErrors = True
    if parsercontext.error_log:
        foundErrors = True
    logSyntaxErrors(parsercontext)
    
    if foundErrors:
        _file.close()
        return None
    parsercontext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
    _parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,filepath)
    eltMdlObjs = {}
    beforeInstanceStream = True
    validator = None
    contextBuffer = []
    unitBuffer = []
    footnoteBuffer = []
    factBuffer = []
    numFacts = numRootFacts2 = 1
    for event, elt in parsercontext:
        if event == "start":
            mdlObj = _parser.makeelement(elt.tag, attrib=elt.attrib, nsmap=elt.nsmap)
            mdlObj.sourceline = elt.sourceline
            eltMdlObjs[elt] = mdlObj
            if elt.getparent() is None:
                modelDocuement = ModelDocuement(modelXbrl, Type.INSTANCE, mappedUri, filepath, etree.ElementTree(mdlObj))
                modelDocuement.xmlRootElement = mdlObj
                modelXbrl.modelDocuement = modelDocuement # needed for incremental validation
                mdlObj.init(modelDocuement)
                modelXbrl.info("streamingExtensions:streaming",
                               _("Stream processing this instance."),
                               modelObject = modelDocuement)    
            else:
                eltMdlObjs[elt.getparent()].append(mdlObj)
                mdlObj._init()
                ns = mdlObj.namespaceURI
                ln = mdlObj.localName
                if (beforeInstanceStream and (
                    (ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
                    (ns == XbrlConst.xbrli and ln in ("context", "unit")) or
                    (ns not in (XbrlConst.link, XbrlConst.xbrli)))):
                    beforeInstanceStream = False
                    if _streamingExtensionsValidate:
                        validator = Validate(modelXbrl)
                        validator.instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
                    else: # need default dimensions
                        ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
            mdlObj = None # deref
                        
        elif event == "end":
            mdlObj = eltMdlObjs.pop(elt)
            if elt.text: # text available after child nodes processed
                mdlObj.text = elt.text
            ns = mdlObj.namespaceURI
            ln = mdlObj.localName
            parentMdlObj = mdlObj.getparent()
            if ns == XbrlConst.xbrli:
                if ln == "context":
                    if mdlObj.get("sticky"):
                        del mdlObj.attrib["sticky"]
                        modelDocuement.contextDiscover(mdlObj)
                    else:
                        if _streamingExtensionsValidate and len(contextBuffer) >= contextBufferLimit:
                            # drop before adding as dropped may have same id as added
                            cntx = contextBuffer.pop(0)
                            dropContext(modelXbrl, cntx)
                            del parentMdlObj[parentMdlObj.index(cntx)]
                            cntx = None
                        modelDocuement.contextDiscover(mdlObj)
                        if contextBufferLimit.is_finite():
                            contextBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        contextsToCheck = (mdlObj,)
                        validator.instValidator.checkContexts(contextsToCheck)
                        if modelXbrl.hasXDT:
                            validator.instValidator.checkContextsDimensions(contextsToCheck)
                        del contextsToCheck # dereference
                elif ln == "unit":
                    if _streamingExtensionsValidate and len(unitBuffer) >= unitBufferLimit:
                        # drop before additing as dropped may have same id as added
                        unit = unitBuffer.pop(0)
                        dropUnit(modelXbrl, unit)
                        del parentMdlObj[parentMdlObj.index(unit)]
                        unit = None 
                    modelDocuement.unitDiscover(mdlObj)
                    if unitBufferLimit.is_finite():
                        unitBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        validator.instValidator.checkUnits( (mdlObj,) )
                elif ln == "xbrl": # end of docuement
                    # check remaining footnote refs
                    for footnoteLink in footnoteBuffer:
                        checkFootnoteHrefs(modelXbrl, footnoteLink)
                elt.clear()
            elif ns == XbrlConst.link:
                if ln in ("schemaRef", "linkbaseRef"):
                    modelDocuement.discoverHref(mdlObj)
                elif ln in ("roleRef", "arcroleRef"):
                    modelDocuement.linkbaseDiscover((mdlObj,), inInstance=True)
                elif ln == "footnoteLink":
                    footnoteLinks = (mdlObj,)
                    modelDocuement.linkbaseDiscover(footnoteLinks, inInstance=True)
                    if footnoteBufferLimit.is_finite():
                        footnoteBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        validator.instValidator.checkLinks(footnoteLinks)
                        if len(footnoteBuffer) > footnoteBufferLimit:
                            # check that hrefObjects for locators were all satisfied
                                # drop before addition as dropped may have same id as added
                            footnoteLink = footnoteBuffer.pop(0)
                            checkFootnoteHrefs(modelXbrl, footnoteLink)
                            dropFootnoteLink(modelXbrl, footnoteLink)
                            del parentMdlObj[parentMdlObj.index(footnoteLink)]
                            footnoteLink = None
                    footnoteLinks = None
                elt.clear()
            elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl:
                numRootFacts2 += 1
                modelDocuement.factDiscover(mdlObj, modelXbrl.facts)
                XmlValidate.validate(modelXbrl, mdlObj)
                if _streamingExtensionsValidate:
                    factsToCheck = (mdlObj,)
                    validator.instValidator.checkFacts(factsToCheck)
                    if modelXbrl.hasXDT:
                        validator.instValidator.checkFactsDimensions(factsToCheck)
                    del factsToCheck
                    dropFact(modelXbrl, mdlObj, modelXbrl.facts)
                    del parentMdlObj[parentMdlObj.index(mdlObj)]
                if numRootFacts2 % 1000 == 0:
                    modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(numRootFacts2, numRootFacts1, 100.0 * numRootFacts2 / numRootFacts1), 
                                              minTimeToShow=20.0)
                # get rid of root element from iterparse's tree
                elt.clear()
                while elt.getprevious() is not None:  # cleans up any prior siblings
                    del elt.getparent()[0]
            mdlObj = None # deref
    logSyntaxErrors(parsercontext)
    del parsercontext
    if validator is not None:
        validator.close()
    _file.close()
    modelXbrl.profileStat(_("streaming complete"), time.time() - startedAt)
    return modelDocuement

Example 135

Project: Arelle Source File: streamingExtensions.py
def streamingExtensionsLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
    # check if big instance and has header with an initial incomplete tree walk (just 2 elements
    if not _streamingExtensionsCheck:
        return None
    
    # track whether modelXbrl has been validated by this streaming extension
    modelXbrl._streamingExtensionValidated = False
        
    def logSyntaxErrors(parsercontext):
        for error in parsercontext.error_log:
            modelXbrl.error("xmlSchema:syntax",
                    _("%(error)s, %(fileName)s, line %(line)s, column %(column)s, %(sourceAction)s source element"),
                    modelObject=modelXbrl, fileName=os.path.basename(filepath), 
                    error=error.message, line=error.line, column=error.column, sourceAction="streaming")
    #### note: written for iterparse of lxml prior to version 3.3, otherwise rewrite to use XmlPullParser ###
    #### note: iterparse wants a binary file, but file is text mode
    _file, = modelXbrl.fileSource.file(filepath, binary=True)
    startedAt = time.time()
    modelXbrl.profileActivity()
    ''' this seems twice as slow as iterparse
    class instInfoTarget():
        def __init__(self, element_factory=None, parser=None):
            self.newTree = True
            self.streamingAspects = None
            self.foundInstance = False
            self.creationSoftwareComment = ''
            self.currentEltTag = "(before xbrli:xbrl)"
            self.numRootFacts = 0
        def start(self, tag, attrib, nsmap=None):
            if self.newTree:
                if tag == "{http://www.xbrl.org/2003/instance}xbrl":
                    self.foundInstance = True
                    self.newTree = False
                else: # break 
                    raise NotInstanceDocuementException()
            elif not tag.startswith("{http://www.xbrl.org/"):
                self.numRootFacts += 1
                if self.numRootFacts % 1000 == 0:
                    modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
            self.currentEltTag = tag
        def end(self, tag):
            pass
        def data(self, data):
            pass
        def comment(self, text):
            if not self.foundInstance: # accuemulate comments before xbrli:xbrl
                self.creationSoftwareComment += ('\n' if self.creationSoftwareComment else '') + text
            elif not self.creationSoftwareComment:
                self.creationSoftwareComment = text # or first comment after xbrli:xbrl
        def pi(self, target, data):
            if target == "xbrl-streamable-instance":
                if self.currentEltTag == "{http://www.xbrl.org/2003/instance}xbrl":
                    self.streamingAspects = dict(etree.PI(target,data).attrib.copy()) # dereference target results
                else:
                    modelXbrl.error("streamingExtensions:headerMisplaced",
                            _("Header is misplaced: %(target)s, must follow xbrli:xbrl element but was found at %(element)s"),
                            modelObject=modelXbrl, target=target, element=self.currentEltTag)
        def close(self):
            if not self.creationSoftwareComment:
                self.creationSoftwareComment = None
            return True
    instInfo = instInfoTarget()
    infoParser = etree.XMLParser(recover=True, huge_tree=True, target=instInfo)
    try:
        etree.parse(_file, parser=infoParser, base_url=filepath)
    except NotInstanceDocuementException:
        pass
    '''
    foundErrors = False
    foundInstance = False
    streamingAspects = None
    creationSoftwareComment = None
    instInfoNumRootFacts = 0
    numElts = 0
    elt = None
    instInfoContext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
    try:
        for event, elt in instInfoContext:
            if event == "start":
                if elt.getparent() is not None:
                    if elt.getparent().tag == "{http://www.xbrl.org/2003/instance}xbrl":
                        if not foundInstance:
                            foundInstance = True
                            pi = precedingProcessingInstruction(elt, "xbrl-streamable-instance")
                            if pi is None:
                                break
                            else:
                                streamingAspects = dict(pi.attrib.copy())
                                if creationSoftwareComment is None:
                                    creationSoftwareComment = precedingComment(elt)
                        if not elt.tag.startswith("{http://www.xbrl.org/"):
                            instInfoNumRootFacts += 1
                            if instInfoNumRootFacts % 1000 == 0:
                                modelXbrl.profileActivity("... streaming tree check", minTimeToShow=20.0)
                    elif not foundInstance:       
                        break
                elif elt.tag == "{http://www.xbrl.org/2003/instance}xbrl":
                    creationSoftwareComment = precedingComment(elt)
                    if precedingProcessingInstruction(elt, "xbrl-streamable-instance") is not None:
                        modelXbrl.error("streamingExtensions:headerMisplaced",
                                _("Header is misplaced: %(error)s, must follow xbrli:xbrl element"),
                                modelObject=elt)
            elif event == "end":
                elt.clear()
                numElts += 1
                if numElts % 1000 == 0 and elt.getparent() is not None:
                    while elt.getprevious() is not None and elt.getparent() is not None:
                        del elt.getparent()[0]
    except etree.XMLSyntaxError as err:
        modelXbrl.error("xmlSchema:syntax",
                _("Unrecoverable error: %(error)s"),
                error=err)
        _file.close()
        return err
        
    _file.seek(0,io.SEEK_SET) # allow reparsing
    if not foundInstance or streamingAspects is None:
        del elt
        _file.close()
        return None
    modelXbrl.profileStat(_("streaming tree check"), time.time() - startedAt)
    startedAt = time.time()
    try:
        version = Decimal(streamingAspects.get("version"))
        if int(version) != 1:
            modelXbrl.error("streamingExtensions:unsupportedVersion",
                    _("Streaming version %(version)s, major version number must be 1"),
                    modelObject=elt, version=version)
            foundErrors = True
    except (InvalidOperation, OverflowError):
        modelXbrl.error("streamingExtensions:versionError",
                _("Version %(version)s, number must be 1.n"),
                modelObject=elt, version=streamingAspects.get("version", "(none)"))
        foundErrors = True
    for bufAspect in ("contextBuffer", "unitBuffer", "footnoteBuffer"):
        try:
            bufLimit = Decimal(streamingAspects.get(bufAspect, "INF"))
            if bufLimit < 1 or (bufLimit.is_finite() and bufLimit % 1 != 0):
                raise InvalidOperation
            elif bufAspect == "contextBuffer":
                contextBufferLimit = bufLimit
            elif bufAspect == "unitBuffer":
                unitBufferLimit = bufLimit
            elif bufAspect == "footnoteBuffer":
                footnoteBufferLimit = bufLimit
        except InvalidOperation:
            modelXbrl.error("streamingExtensions:valueError",
                    _("Streaming %(attrib)s %(value)s, number must be a positive integer or INF"),
                    modelObject=elt, attrib=bufAspect, value=streamingAspects.get(bufAspect))
            foundErrors = True
    if _streamingExtensionsValidate:
        incompatibleValidations = []
        _validateDisclosureSystem = modelXbrl.modelManager.validateDisclosureSystem
        _disclosureSystem = modelXbrl.modelManager.disclosureSystem
        if _validateDisclosureSystem and _disclosureSystem.validationType == "EFM":
            incompatibleValidations.append("EFM")
        if _validateDisclosureSystem and _disclosureSystem.validationType == "GFM":
            incompatibleValidations.append("GFM")
        if _validateDisclosureSystem and _disclosureSystem.validationType == "HMRC":
            incompatibleValidations.append("HMRC")
        if modelXbrl.modelManager.validateCalcLB:
            incompatibleValidations.append("calculation LB")
        if incompatibleValidations:
            modelXbrl.error("streamingExtensions:incompatibleValidation",
                    _("Streaming instance validation does not support %(incompatibleValidations)s validation"),
                    modelObject=modelXbrl, incompatibleValidations=', '.join(incompatibleValidations))
            foundErrors = True
    if instInfoContext.error_log:
        foundErrors = True
    logSyntaxErrors(instInfoContext)
    del instInfoContext # dereference

    for pluginMethod in pluginClassMethods("Streaming.BlockStreaming"):
        _blockingPluginName = pluginMethod(modelXbrl)
        if _blockingPluginName: # name of blocking plugin is returned
            modelXbrl.error("streamingExtensions:incompatiblePlugIn",
                    _("Streaming instance not supported by plugin %(blockingPlugin)s"),
                    modelObject=modelXbrl, blockingPlugin=_blockingPluginName)
            foundErrors = True
    
    if foundErrors:
        _file.close()
        return None

    _encoding = XmlUtil.encoding(_file.read(512))
    _file.seek(0,io.SEEK_SET) # allow reparsing

    if _streamingExtensionsValidate:
        validator = Validate(modelXbrl)
        instValidator = validator.instValidator

    contextBuffer = []
    contextsToDrop = []
    unitBuffer = []
    unitsToDrop = []
    footnoteBuffer = []
    footnoteLinksToDrop = []
    
    _streamingFactsPlugin = any(True for pluginMethod in pluginClassMethods("Streaming.Facts"))
    _streamingValidateFactsPlugin = (_streamingExtensionsValidate and 
                                     any(True for pluginMethod in pluginClassMethods("Streaming.ValidateFacts")))

    ''' this is very much slower than iterparse
    class modelLoaderTarget():
        def __init__(self, element_factory=None, parser=None):
            self.newTree = True
            self.currentMdlObj = None
            self.beforeInstanceStream = True
            self.beforeStartStreamingPlugin = True
            self.numRootFacts = 1
            modelXbrl.makeelementParentModelObject = None
            modelXbrl.isStreamingMode = True
            self.factsCheckVersion = None
            self.factsCheckMd5s = Md5Sum()
        def start(self, tag, attrib, nsmap=None):
            modelXbrl.makeelementParentModelObject = self.currentMdlObj # pass parent to makeelement for ModelObjectFactory
            mdlObj = _parser.makeelement(tag, attrib=attrib, nsmap=nsmap)
            mdlObj.sourceline = 1
            if self.newTree:
                self.newTree = False
                self.currentMdlObj = mdlObj
                modelDocuement = ModelDocuement(modelXbrl, Type.INSTANCE, mappedUri, filepath, mdlObj.getroottree())
                modelXbrl.modelDocuement = modelDocuement # needed for incremental validation
                mdlObj.init(modelDocuement)
                modelDocuement.parser = _parser # needed for XmlUtil addChild's makeelement 
                modelDocuement.parserLookupName = _parserLookupName
                modelDocuement.parserLookupClass = _parserLookupClass
                modelDocuement.xmlRootElement = mdlObj
                modelDocuement.schemaLocationElements.add(mdlObj)
                modelDocuement.docuementEncoding = _encoding
                modelDocuement._creationSoftwareComment = creationSoftwareComment
                modelXbrl.info("streamingExtensions:streaming",
                               _("Stream processing this instance."),
                               modelObject = modelDocuement)
            else:
                self.currentMdlObj.append(mdlObj)
                self.currentMdlObj = mdlObj
                mdlObj._init()
                ns = mdlObj.namespaceURI
                ln = mdlObj.localName
                if (self.beforeInstanceStream and (
                    (ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
                    (ns == XbrlConst.xbrli and ln in ("context", "unit")) or
                    (ns not in (XbrlConst.link, XbrlConst.xbrli)))):
                    self.beforeInstanceStream = False
                    if _streamingExtensionsValidate:
                        instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
                    else: # need default dimensions
                        ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
                elif not self.beforeInstanceStream and self.beforeStartStreamingPlugin:
                    for pluginMethod in pluginClassMethods("Streaming.Start"):
                        pluginMethod(modelXbrl)
                    self.beforeStartStreamingPlugin = False
            return mdlObj
        def end(self, tag):
            modelDocuement = modelXbrl.modelDocuement
            mdlObj = self.currentMdlObj
            parentMdlObj = mdlObj.getparent()
            self.currentMdlObj = parentMdlObj
            ns = mdlObj.namespaceURI
            ln = mdlObj.localName
            if ns == XbrlConst.xbrli:
                if ln == "context":
                    if mdlObj.get("sticky"):
                        del mdlObj.attrib["sticky"]
                        XmlValidate.validate(modelXbrl, mdlObj)
                        modelDocuement.contextDiscover(mdlObj)
                    else:
                        if _streamingExtensionsValidate and len(contextBuffer) >= contextBufferLimit:
                            # drop before adding as dropped may have same id as added
                            cntx = contextBuffer.pop(0)
                            if _streamingValidateFactsPlugin:
                                contextsToDrop.append(cntx)
                            else:
                                dropContext(modelXbrl, cntx)
                                del parentMdlObj[parentMdlObj.index(cntx)]
                            cntx = None
                        #>>XmlValidate.validate(modelXbrl, mdlObj)
                        #>>modelDocuement.contextDiscover(mdlObj)
                        if contextBufferLimit.is_finite():
                            contextBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        contextsToCheck = (mdlObj,)
                        instValidator.checkContexts(contextsToCheck)
                        if modelXbrl.hasXDT:
                            instValidator.checkContextsDimensions(contextsToCheck)
                        del contextsToCheck # dereference
                elif ln == "unit":
                    if _streamingExtensionsValidate and len(unitBuffer) >= unitBufferLimit:
                        # drop before adding as dropped may have same id as added
                        unit = unitBuffer.pop(0)
                        if _streamingValidateFactsPlugin:
                            unitsToDrop.append(unit)
                        else:
                            dropUnit(modelXbrl, unit)
                            del parentMdlObj[parentMdlObj.index(unit)]
                        unit = None 
                    #>>XmlValidate.validate(modelXbrl, mdlObj)
                    #>>modelDocuement.unitDiscover(mdlObj)
                    if unitBufferLimit.is_finite():
                        unitBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        instValidator.checkUnits( (mdlObj,) )
                elif ln == "xbrl": # end of docuement
                    # check remaining batched facts if any
                    if _streamingValidateFactsPlugin:
                        # plugin attempts to process batch of all root facts not yet processed (not just current one)
                        # finish any final batch of facts
                        if len(modelXbrl.facts) > 0:
                            factsToCheck = modelXbrl.facts.copy()
                            factsHaveBeenProcessed = True
                            # can block facts deletion if required data not yet available, such as numeric unit for DpmDB
                            for pluginMethod in pluginClassMethods("Streaming.ValidateFacts"):
                                if not pluginMethod(modelXbrl, factsToCheck):
                                    factsHaveBeenProcessed = False
                            if factsHaveBeenProcessed:
                                for fact in factsToCheck:
                                    dropFact(modelXbrl, fact, modelXbrl.facts)
                                    del parentMdlObj[parentMdlObj.index(fact)]
                                for cntx in contextsToDrop:
                                    dropContext(modelXbrl, cntx)
                                    del parentMdlObj[parentMdlObj.index(cntx)]
                                for unit in unitsToDrop:
                                    dropUnit(modelXbrl, unit)
                                    del parentMdlObj[parentMdlObj.index(unit)]
                                for footnoteLink in footnoteLinksToDrop:
                                    dropFootnoteLink(modelXbrl, footnoteLink)
                                    del parentMdlObj[parentMdlObj.index(footnoteLink)]
                                fact = cntx = unit = footnoteLink = None
                                del contextsToDrop[:]
                                del unitsToDrop[:]
                                del footnoteLinksToDrop[:]
                            del factsToCheck
                    # check remaining footnote refs
                    for footnoteLink in footnoteBuffer:
                        checkFootnoteHrefs(modelXbrl, footnoteLink)
                    for pluginMethod in pluginClassMethods("Streaming.Finish"):
                        pluginMethod(modelXbrl)
            elif ns == XbrlConst.link:
                if ln == "footnoteLink":
                    XmlValidate.validate(modelXbrl, mdlObj)
                    footnoteLinks = (mdlObj,)
                    modelDocuement.linkbaseDiscover(footnoteLinks, inInstance=True)
                    if footnoteBufferLimit.is_finite():
                        footnoteBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        instValidator.checkLinks(footnoteLinks)
                        if len(footnoteBuffer) > footnoteBufferLimit:
                            # check that hrefObjects for locators were all satisfied
                                # drop before addition as dropped may have same id as added
                            footnoteLink = footnoteBuffer.pop(0)
                            checkFootnoteHrefs(modelXbrl, footnoteLink)
                            if _streamingValidateFactsPlugin:
                                footnoteLinksToDrop.append(footnoteLink)
                            else:
                                dropFootnoteLink(modelXbrl, footnoteLink)
                                del parentMdlObj[parentMdlObj.index(footnoteLink)]
                            footnoteLink = None
                    footnoteLinks = None
                elif ln in ("schemaRef", "linkbaseRef"):
                    modelDocuement.discoverHref(mdlObj)
                elif not modelXbrl.skipDTS:
                    if ln in ("roleRef", "arcroleRef"):
                        modelDocuement.linkbaseDiscover((mdlObj,), inInstance=True)
            elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl:
                self.numRootFacts += 1
                #>>XmlValidate.validate(modelXbrl, mdlObj)
                #>>modelDocuement.factDiscover(mdlObj, modelXbrl.facts)
                if self.factsCheckVersion:
                    self.factCheckFact(mdlObj)
                if _streamingExtensionsValidate or _streamingValidateFactsPlugin:
                    factsToCheck = (mdlObj,)  # validate current fact by itself
                    if _streamingExtensionsValidate:
                        instValidator.checkFacts(factsToCheck)
                        if modelXbrl.hasXDT:
                            instValidator.checkFactsDimensions(factsToCheck)
                    if _streamingValidateFactsPlugin:
                        # plugin attempts to process batch of all root facts not yet processed (not just current one)
                        # use batches of 1000 facts
                        if len(modelXbrl.facts) > 1000:
                            factsToCheck = modelXbrl.facts.copy()
                            factsHaveBeenProcessed = True
                            # can block facts deletion if required data not yet available, such as numeric unit for DpmDB
                            for pluginMethod in pluginClassMethods("Streaming.ValidateFacts"):
                                if not pluginMethod(modelXbrl, factsToCheck):
                                    factsHaveBeenProcessed = False
                            if factsHaveBeenProcessed:
                                for fact in factsToCheck:
                                    dropFact(modelXbrl, fact, modelXbrl.facts)
                                    del parentMdlObj[parentMdlObj.index(fact)]
                                for cntx in contextsToDrop:
                                    dropContext(modelXbrl, cntx)
                                    del parentMdlObj[parentMdlObj.index(cntx)]
                                for unit in unitsToDrop:
                                    dropUnit(modelXbrl, unit)
                                    del parentMdlObj[parentMdlObj.index(unit)]
                                for footnoteLink in footnoteLinksToDrop:
                                    dropFootnoteLink(modelXbrl, footnoteLink)
                                    del parentMdlObj[parentMdlObj.index(footnoteLink)]
                                fact = cntx = unit = footnoteLink = None
                                del contextsToDrop[:]
                                del unitsToDrop[:]
                                del footnoteLinksToDrop[:]
                            del factsToCheck # dereference fact or batch of facts
                    else:
                        dropFact(modelXbrl, mdlObj, modelXbrl.facts) # single fact has been processed
                        del parentMdlObj[parentMdlObj.index(mdlObj)]
                if self.numRootFacts % 1000 == 0:
                    pass
                    #modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(self.numRootFacts, instInfoNumRootFacts, 
                    #                                                                          100.0 * self.numRootFacts / instInfoNumRootFacts), 
                    #                          minTimeToShow=20.0)
                    gc.collect()
                    sys.stdout.write ("\rAt fact {} of {} mem {}".format(self.numRootFacts, instInfoNumRootFacts, modelXbrl.modelManager.cntlr.memoryUsed))
            return mdlObj
        def data(self, data):
            self.currentMdlObj.text = data
        def comment(self, text):
            pass
        def pi(self, target, data):
            if target == "xbrl-facts-check":
                _match = re.search("([\\w-]+)=[\"']([^\"']+)[\"']", data)
                if _match:
                    _matchGroups = _match.groups()
                    if len(_matchGroups) == 2:
                        if _matchGroups[0] == "version":
                            self.factsCheckVersion = _matchGroups[1]
                        elif _matchGroups[0] == "sum-of-fact-md5s":
                            try:
                                expectedMd5 = Md5Sum(_matchGroups[1])
                                if self.factsCheckMd5s != expectedMd5:
                                    modelXbrl.warning("streamingExtensions:xbrlFactsCheckWarning",
                                            _("XBRL facts sum of md5s expected %(expectedMd5)s not matched to actual sum %(actualMd5Sum)s"),
                                            modelObject=modelXbrl, expectedMd5=expectedMd5, actualMd5Sum=self.factsCheckMd5s)
                                else:
                                    modelXbrl.info("info",
                                            _("Successful XBRL facts sum of md5s."),
                                            modelObject=modelXbrl)
                            except ValueError:
                                modelXbrl.error("streamingExtensions:xbrlFactsCheckError",
                                        _("Invalid sum-of-md5s %(sumOfMd5)s"),
                                        modelObject=modelXbrl, sumOfMd5=_matchGroups[1])
        def close(self):
            del modelXbrl.makeelementParentModelObject
            return None
        
        def factCheckFact(self, fact):
            self.factsCheckMd5s += fact.md5sum
            for _tupleFact in fact.modelTupleFacts:
                self.factCheckFact(_tupleFact)
        
    _parser, _parserLookupName, _parserLookupClass = parser(modelXbrl, filepath, target=modelLoaderTarget())
    etree.parse(_file, parser=_parser, base_url=filepath)
    logSyntaxErrors(_parser)
    '''
    # replace modelLoaderTarget with iterparse (as it now supports CustomElementClassLookup)
    streamingParserContext = etree.iterparse(_file, events=("start","end"), huge_tree=True)
    from arelle.ModelObjectFactory import setParserElementClassLookup
    modelXbrl.isStreamingMode = True # must be set before setting element class lookup
    (_parser, _parserLookupName, _parserLookupClass) = setParserElementClassLookup(streamingParserContext, modelXbrl)
    foundInstance = False
    beforeInstanceStream = beforeStartStreamingPlugin = True
    numRootFacts = 0
    factsCheckVersion = None
    def factCheckFact(fact):
        modelDocuement._factsCheckMd5s += fact.md5sum
        for _tupleFact in fact.modelTupleFacts:
            factCheckFact(_tupleFact)
    for event, mdlObj in streamingParserContext:
        if event == "start":
            if mdlObj.tag == "{http://www.xbrl.org/2003/instance}xbrl":
                modelDocuement = ModelDocuement(modelXbrl, Type.INSTANCE, mappedUri, filepath, mdlObj.getroottree())
                modelXbrl.modelDocuement = modelDocuement # needed for incremental validation
                mdlObj.init(modelDocuement)
                modelDocuement.parser = _parser # needed for XmlUtil addChild's makeelement 
                modelDocuement.parserLookupName = _parserLookupName
                modelDocuement.parserLookupClass = _parserLookupClass
                modelDocuement.xmlRootElement = mdlObj
                modelDocuement.schemaLocationElements.add(mdlObj)
                modelDocuement.docuementEncoding = _encoding
                modelDocuement._creationSoftwareComment = precedingComment(mdlObj)
                modelDocuement._factsCheckMd5s = Md5Sum()
                modelXbrl.info("streamingExtensions:streaming",
                               _("Stream processing this instance."),
                               modelObject = modelDocuement)
            elif mdlObj.getparent() is not None:
                mdlObj._init() # requires discovery as part of start elements
                if mdlObj.getparent().tag == "{http://www.xbrl.org/2003/instance}xbrl":
                    if not foundInstance:
                        foundInstance = True
                        pi = precedingProcessingInstruction(mdlObj, "xbrl-facts-check")
                        if pi is not None:
                            factsCheckVersion = pi.attrib.get("version", None)
                elif not foundInstance:       
                    break
                ns = mdlObj.qname.namespaceURI
                ln = mdlObj.qname.localName
                if beforeInstanceStream:
                    if ((ns == XbrlConst.link and ln not in ("schemaRef", "linkbaseRef")) or
                        (ns == XbrlConst.xbrli and ln in ("context", "unit")) or
                        (ns not in (XbrlConst.link, XbrlConst.xbrli))):
                        beforeInstanceStream = False
                        if _streamingExtensionsValidate:
                            instValidator.validate(modelXbrl, modelXbrl.modelManager.formulaOptions.typedParameters())
                        else: # need default dimensions
                            ValidateXbrlDimensions.loadDimensionDefaults(modelXbrl)
                elif not beforeInstanceStream and beforeStartStreamingPlugin:
                    for pluginMethod in pluginClassMethods("Streaming.Start"):
                        pluginMethod(modelXbrl)
                    beforeStartStreamingPlugin = False
        elif event == "end":
            parentMdlObj = mdlObj.getparent()
            ns = mdlObj.namespaceURI
            ln = mdlObj.localName
            if ns == XbrlConst.xbrli:
                if ln == "context":
                    if mdlObj.get("sticky"):
                        del mdlObj.attrib["sticky"]
                        XmlValidate.validate(modelXbrl, mdlObj)
                        modelDocuement.contextDiscover(mdlObj)
                    else:
                        if len(contextBuffer) >= contextBufferLimit:
                            # drop before adding as dropped may have same id as added
                            cntx = contextBuffer.pop(0)
                            if _streamingFactsPlugin or _streamingValidateFactsPlugin:
                                contextsToDrop.append(cntx)
                            else:
                                dropContext(modelXbrl, cntx)
                                #>>del parentMdlObj[parentMdlObj.index(cntx)]
                            cntx = None
                        XmlValidate.validate(modelXbrl, mdlObj)
                        modelDocuement.contextDiscover(mdlObj)
                        if contextBufferLimit.is_finite():
                            contextBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        contextsToCheck = (mdlObj,)
                        instValidator.checkContexts(contextsToCheck)
                        if modelXbrl.hasXDT:
                            instValidator.checkContextsDimensions(contextsToCheck)
                        del contextsToCheck # dereference
                elif ln == "unit":
                    if len(unitBuffer) >= unitBufferLimit:
                        # drop before additing as dropped may have same id as added
                        unit = unitBuffer.pop(0)
                        if _streamingFactsPlugin or _streamingValidateFactsPlugin:
                            unitsToDrop.append(unit)
                        else:
                            dropUnit(modelXbrl, unit)
                            #>>del parentMdlObj[parentMdlObj.index(unit)]
                        unit = None 
                    XmlValidate.validate(modelXbrl, mdlObj)
                    modelDocuement.unitDiscover(mdlObj)
                    if unitBufferLimit.is_finite():
                        unitBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        instValidator.checkUnits( (mdlObj,) )
                elif ln == "xbrl": # end of docuement
                    # check remaining batched facts if any
                    if _streamingFactsPlugin or _streamingValidateFactsPlugin:
                        # plugin attempts to process batch of all root facts not yet processed (not just current one)
                        # finish any final batch of facts
                        if len(modelXbrl.facts) > 0:
                            factsToCheck = modelXbrl.facts.copy()
                            # can block facts deletion if required data not yet available, such as numeric unit for DpmDB
                            if _streamingValidateFactsPlugin:
                                for pluginMethod in pluginClassMethods("Streaming.ValidateFacts"):
                                    pluginMethod(instValidator, factsToCheck)
                            if _streamingFactsPlugin:
                                for pluginMethod in pluginClassMethods("Streaming.Facts"):
                                    pluginMethod(modelXbrl, factsToCheck)
                            for fact in factsToCheck:
                                dropFact(modelXbrl, fact, modelXbrl.facts)
                                #>>del parentMdlObj[parentMdlObj.index(fact)]
                            for cntx in contextsToDrop:
                                dropContext(modelXbrl, cntx)
                                #>>del parentMdlObj[parentMdlObj.index(cntx)]
                            for unit in unitsToDrop:
                                dropUnit(modelXbrl, unit)
                                #>>del parentMdlObj[parentMdlObj.index(unit)]
                            for footnoteLink in footnoteLinksToDrop:
                                dropFootnoteLink(modelXbrl, footnoteLink)
                                #>>del parentMdlObj[parentMdlObj.index(footnoteLink)]
                            fact = cntx = unit = footnoteLink = None
                            del contextsToDrop[:]
                            del unitsToDrop[:]
                            del footnoteLinksToDrop[:]
                            del factsToCheck
                    # check remaining footnote refs
                    for footnoteLink in footnoteBuffer:
                        checkFootnoteHrefs(modelXbrl, footnoteLink)
                    pi = childProcessingInstruction(mdlObj, "xbrl-facts-check", reversed=True)
                    if pi is not None: # attrib is in .text, not attrib, no idea why!!!
                        _match = re.search("([\\w-]+)=[\"']([^\"']+)[\"']", pi.text)
                        if _match:
                            _matchGroups = _match.groups()
                            if len(_matchGroups) == 2:
                                if _matchGroups[0] == "sum-of-fact-md5s":
                                    try:
                                        expectedMd5 = Md5Sum(_matchGroups[1])
                                        if modelDocuement._factsCheckMd5s != expectedMd5:
                                            modelXbrl.warning("streamingExtensions:xbrlFactsCheckWarning",
                                                    _("XBRL facts sum of md5s expected %(expectedMd5)s not matched to actual sum %(actualMd5Sum)s"),
                                                    modelObject=modelXbrl, expectedMd5=expectedMd5, actualMd5Sum=modelDocuement._factsCheckMd5s)
                                        else:
                                            modelXbrl.info("info",
                                                    _("Successful XBRL facts sum of md5s."),
                                                    modelObject=modelXbrl)
                                    except ValueError:
                                        modelXbrl.error("streamingExtensions:xbrlFactsCheckError",
                                                _("Invalid sum-of-md5s %(sumOfMd5)s"),
                                                modelObject=modelXbrl, sumOfMd5=_matchGroups[1])
                    if _streamingValidateFactsPlugin:
                        for pluginMethod in pluginClassMethods("Streaming.ValidateFinish"):
                            pluginMethod(instValidator)
                    if _streamingFactsPlugin:
                        for pluginMethod in pluginClassMethods("Streaming.Finish"):
                            pluginMethod(modelXbrl)
            elif ns == XbrlConst.link:
                if ln in ("schemaRef", "linkbaseRef"):
                    modelDocuement.discoverHref(mdlObj, urlRewritePluginClass="ModelDocuement.InstanceSchemaRefRewriter")
                elif ln in ("roleRef", "arcroleRef"):
                    modelDocuement.linkbaseDiscover((mdlObj,), inInstance=True)
                elif ln == "footnoteLink":
                    XmlValidate.validate(modelXbrl, mdlObj)
                    footnoteLinks = (mdlObj,)
                    modelDocuement.linkbaseDiscover(footnoteLinks, inInstance=True)
                    if footnoteBufferLimit.is_finite():
                        footnoteBuffer.append(mdlObj)
                    if _streamingExtensionsValidate:
                        instValidator.checkLinks(footnoteLinks)
                        if len(footnoteBuffer) > footnoteBufferLimit:
                            # check that hrefObjects for locators were all satisfied
                                # drop before addition as dropped may have same id as added
                            footnoteLink = footnoteBuffer.pop(0)
                            checkFootnoteHrefs(modelXbrl, footnoteLink)
                            if _streamingValidateFactsPlugin:
                                footnoteLinksToDrop.append(footnoteLink)
                            else:
                                dropFootnoteLink(modelXbrl, footnoteLink)
                                #>>del parentMdlObj[parentMdlObj.index(footnoteLink)]
                            footnoteLink = None
                    footnoteLinks = None
            elif parentMdlObj.qname == XbrlConst.qnXbrliXbrl and isinstance(mdlObj, ModelFact):
                numRootFacts += 1
                XmlValidate.validate(modelXbrl, mdlObj)
                modelDocuement.factDiscover(mdlObj, modelXbrl.facts)
                if factsCheckVersion:
                    factCheckFact(mdlObj)
                if _streamingExtensionsValidate or _streamingFactsPlugin or _streamingValidateFactsPlugin:
                    factsToCheck = (mdlObj,)  # validate current fact by itself
                    if _streamingExtensionsValidate:
                        instValidator.checkFacts(factsToCheck)
                        if modelXbrl.hasXDT:
                            instValidator.checkFactsDimensions(factsToCheck)
                    if _streamingFactsPlugin or _streamingValidateFactsPlugin:
                        # plugin attempts to process batch of all root facts not yet processed (not just current one)
                        # use batches of 1000 facts
                        if len(modelXbrl.facts) > 1000:
                            factsToCheck = modelXbrl.facts.copy()
                            # can block facts deletion if required data not yet available, such as numeric unit for DpmDB
                            if _streamingValidateFactsPlugin:
                                for pluginMethod in pluginClassMethods("Streaming.ValidateFacts"):
                                    pluginMethod(instValidator, factsToCheck)
                            if _streamingFactsPlugin:
                                for pluginMethod in pluginClassMethods("Streaming.Facts"):
                                    pluginMethod(modelXbrl, factsToCheck)
                            for fact in factsToCheck:
                                dropFact(modelXbrl, fact, modelXbrl.facts)
                                #>>del parentMdlObj[parentMdlObj.index(fact)]
                            for cntx in contextsToDrop:
                                dropContext(modelXbrl, cntx)
                                #>>del parentMdlObj[parentMdlObj.index(cntx)]
                            for unit in unitsToDrop:
                                dropUnit(modelXbrl, unit)
                                #>>del parentMdlObj[parentMdlObj.index(unit)]
                            for footnoteLink in footnoteLinksToDrop:
                                dropFootnoteLink(modelXbrl, footnoteLink)
                                #>>del parentMdlObj[parentMdlObj.index(footnoteLink)]
                            fact = cntx = unit = footnoteLink = None
                            del contextsToDrop[:]
                            del unitsToDrop[:]
                            del footnoteLinksToDrop[:]
                            del factsToCheck # dereference fact or batch of facts
                    else:
                        dropFact(modelXbrl, mdlObj, modelXbrl.facts) # single fact has been processed
                        #>>del parentMdlObj[parentMdlObj.index(mdlObj)]
                if numRootFacts % 1000 == 0:
                    pass
                    #modelXbrl.profileActivity("... streaming fact {0} of {1} {2:.2f}%".format(self.numRootFacts, instInfoNumRootFacts, 
                    #                                                                          100.0 * self.numRootFacts / instInfoNumRootFacts), 
                    #                          minTimeToShow=20.0)
                    #gc.collect()
                    #sys.stdout.write ("\rAt fact {} of {} mem {}".format(numRootFacts, instInfoNumRootFacts, modelXbrl.modelManager.cntlr.memoryUsed))
    if mdlObj is not None:
        mdlObj.clear()
    del _parser, _parserLookupName, _parserLookupClass                
                
    if _streamingExtensionsValidate and validator is not None:
        _file.close()
        del instValidator
        validator.close()
        # track that modelXbrl has been validated by this streaming extension
        modelXbrl._streamingExtensionValidated = True
        
    modelXbrl.profileStat(_("streaming complete"), time.time() - startedAt)
    return modelXbrl.modelDocuement

Example 136

Project: Arelle Source File: ValidateXbrlCalcs.py
    def validate(self):
        if not self.modelXbrl.contexts and not self.modelXbrl.facts:
            return # skip if no contexts or facts
        
        if not self.inferDecimals: # infering precision is now contrary to XBRL REC section 5.2.5.2
            self.modelXbrl.info("xbrl.5.2.5.2:inferringPrecision","Validating calculations inferring precision.")
            
        # identify equal contexts
        self.modelXbrl.profileActivity()
        uniqueContextHashes = {}
        for context in self.modelXbrl.contexts.values():
            h = context.contextDimAwareHash
            if h in uniqueContextHashes:
                if context.isEqualTo(uniqueContextHashes[h]):
                    self.mapContext[context] = uniqueContextHashes[h]
            else:
                uniqueContextHashes[h] = context
        del uniqueContextHashes
        self.modelXbrl.profileActivity("... identify equal contexts", minTimeToShow=1.0)

        # identify equal contexts
        uniqueUnitHashes = {}
        for unit in self.modelXbrl.units.values():
            h = unit.hash
            if h in uniqueUnitHashes:
                if unit.isEqualTo(uniqueUnitHashes[h]):
                    self.mapUnit[unit] = uniqueUnitHashes[h]
            else:
                uniqueUnitHashes[h] = unit
        self.modelXbrl.profileActivity("... identify equal units", minTimeToShow=1.0)
                    
        # identify concepts participating in essence-alias relationships
        # identify calcluation & essence-alias base sets (by key)
        for baseSetKey in self.modelXbrl.baseSets.keys():
            arcrole, ELR, linkqname, arcqname = baseSetKey
            if ELR and linkqname and arcqname:
                if arcrole in (XbrlConst.essenceAlias, XbrlConst.requiresElement):
                    conceptsSet = {XbrlConst.essenceAlias:self.conceptsInEssencesAlias,
                                   XbrlConst.requiresElement:self.conceptsInRequiresElement}[arcrole]
                    for modelRel in self.modelXbrl.relationshipSet(arcrole,ELR,linkqname,arcqname).modelRelationships:
                        for concept in (modelRel.fromModelObject, modelRel.toModelObject):
                            if concept is not None and concept.qname is not None:
                                conceptsSet.add(concept)
        self.modelXbrl.profileActivity("... identify requires-element and esseance-aliased concepts", minTimeToShow=1.0)

        self.bindFacts(self.modelXbrl.facts,[self.modelXbrl.modelDocuement.xmlRootElement])
        self.modelXbrl.profileActivity("... bind facts", minTimeToShow=1.0)
        
        # identify calcluation & essence-alias base sets (by key)
        for baseSetKey in self.modelXbrl.baseSets.keys():
            arcrole, ELR, linkqname, arcqname = baseSetKey
            if ELR and linkqname and arcqname:
                if arcrole in (XbrlConst.summationItem, XbrlConst.essenceAlias, XbrlConst.requiresElement):
                    relsSet = self.modelXbrl.relationshipSet(arcrole,ELR,linkqname,arcqname)
                    if arcrole == XbrlConst.summationItem:
                        fromRelationships = relsSet.fromModelObjects()
                        for sumConcept, modelRels in fromRelationships.items():
                            sumBindingKeys = self.sumConceptBindKeys[sumConcept]
                            dupBindingKeys = set()
                            boundSumKeys = set()
                            # determine boundSums
                            for modelRel in modelRels:
                                itemConcept = modelRel.toModelObject
                                if itemConcept is not None and itemConcept.qname is not None:
                                    itemBindingKeys = self.itemConceptBindKeys[itemConcept]
                                    boundSumKeys |= sumBindingKeys & itemBindingKeys
                            # add up rounded items
                            boundSums = defaultdict(decimal.Decimal) # sum of facts meeting factKey
                            boundSummationItems = defaultdict(list) # corresponding fact refs for messages
                            for modelRel in modelRels:
                                weight = modelRel.weightDecimal
                                itemConcept = modelRel.toModelObject
                                if itemConcept is not None:
                                    for itemBindKey in boundSumKeys:
                                        ancestor, contextHash, unit = itemBindKey
                                        factKey = (itemConcept, ancestor, contextHash, unit)
                                        if factKey in self.itemFacts:
                                            for fact in self.itemFacts[factKey]:
                                                if fact in self.duplicatedFacts:
                                                    dupBindingKeys.add(itemBindKey)
                                                elif fact not in self.consistentDupFacts:
                                                    roundedValue = roundFact(fact, self.inferDecimals)
                                                    boundSums[itemBindKey] += roundedValue * weight
                                                    boundSummationItems[itemBindKey].append(wrappedFactWithWeight(fact,weight,roundedValue))
                            for sumBindKey in boundSumKeys:
                                ancestor, contextHash, unit = sumBindKey
                                factKey = (sumConcept, ancestor, contextHash, unit)
                                if factKey in self.sumFacts:
                                    sumFacts = self.sumFacts[factKey]
                                    for fact in sumFacts:
                                        if fact in self.duplicatedFacts:
                                            dupBindingKeys.add(sumBindKey)
                                        elif sumBindKey not in dupBindingKeys and fact not in self.consistentDupFacts:
                                            roundedSum = roundFact(fact, self.inferDecimals)
                                            roundedItemsSum = roundFact(fact, self.inferDecimals, vDecimal=boundSums[sumBindKey])
                                            if roundedItemsSum  != roundFact(fact, self.inferDecimals):
                                                d = inferredDecimals(fact)
                                                if isnan(d) or isinf(d): d = 4
                                                _boundSummationItems = boundSummationItems[sumBindKey]
                                                unreportedContribingItemQnames = [] # list the missing/unreported contributors in relationship order
                                                for modelRel in modelRels:
                                                    itemConcept = modelRel.toModelObject
                                                    if (itemConcept is not None and 
                                                        (itemConcept, ancestor, contextHash, unit) not in self.itemFacts):
                                                        unreportedContribingItemQnames.append(str(itemConcept.qname))
                                                self.modelXbrl.log('INCONSISTENCY', "xbrl.5.2.5.2:calcInconsistency",
                                                    _("Calculation inconsistent from %(concept)s in link role %(linkrole)s reported sum %(reportedSum)s computed sum %(computedSum)s context %(contextID)s unit %(unitID)s unreportedContributingItems %(unreportedContributors)s"),
                                                    modelObject=wrappedSummationAndItems(fact, roundedSum, _boundSummationItems),
                                                    concept=sumConcept.qname, linkrole=ELR, 
                                                    linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR),
                                                    reportedSum=Locale.format_decimal(self.modelXbrl.locale, roundedSum, 1, max(d,0)),
                                                    computedSum=Locale.format_decimal(self.modelXbrl.locale, roundedItemsSum, 1, max(d,0)), 
                                                    contextID=fact.context.id, unitID=fact.unit.id,
                                                    unreportedContributors=", ".join(unreportedContribingItemQnames) or "none")
                                                del unreportedContribingItemQnames[:]
                            boundSummationItems.clear() # dereference facts in list
                    elif arcrole == XbrlConst.essenceAlias:
                        for modelRel in relsSet.modelRelationships:
                            essenceConcept = modelRel.fromModelObject
                            aliasConcept = modelRel.toModelObject
                            essenceBindingKeys = self.esAlConceptBindKeys[essenceConcept]
                            aliasBindingKeys = self.esAlConceptBindKeys[aliasConcept]
                            for esAlBindKey in essenceBindingKeys & aliasBindingKeys:
                                ancestor, contextHash = esAlBindKey
                                essenceFactsKey = (essenceConcept, ancestor, contextHash)
                                aliasFactsKey = (aliasConcept, ancestor, contextHash)
                                if essenceFactsKey in self.esAlFacts and aliasFactsKey in self.esAlFacts:
                                    for eF in self.esAlFacts[essenceFactsKey]:
                                        for aF in self.esAlFacts[aliasFactsKey]:
                                            essenceUnit = self.mapUnit.get(eF.unit,eF.unit)
                                            aliasUnit = self.mapUnit.get(aF.unit,aF.unit)
                                            if essenceUnit != aliasUnit:
                                                self.modelXbrl.log('INCONSISTENCY', "xbrl.5.2.6.2.2:essenceAliasUnitsInconsistency",
                                                    _("Essence-Alias inconsistent units from %(essenceConcept)s to %(aliasConcept)s in link role %(linkrole)s context %(contextID)s"),
                                                    modelObject=(modelRel, eF, aF), 
                                                    essenceConcept=essenceConcept.qname, aliasConcept=aliasConcept.qname, 
                                                    linkrole=ELR, 
                                                    linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR),
                                                    contextID=eF.context.id)
                                            if not XbrlUtil.vEqual(eF, aF):
                                                self.modelXbrl.log('INCONSISTENCY', "xbrl.5.2.6.2.2:essenceAliasUnitsInconsistency",
                                                    _("Essence-Alias inconsistent value from %(essenceConcept)s to %(aliasConcept)s in link role %(linkrole)s context %(contextID)s"),
                                                    modelObject=(modelRel, eF, aF), 
                                                    essenceConcept=essenceConcept.qname, aliasConcept=aliasConcept.qname, 
                                                    linkrole=ELR,
                                                    linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR),
                                                    contextID=eF.context.id)
                    elif arcrole == XbrlConst.requiresElement:
                        for modelRel in relsSet.modelRelationships:
                            sourceConcept = modelRel.fromModelObject
                            requiredConcept = modelRel.toModelObject
                            if sourceConcept in self.requiresElementFacts and \
                               not requiredConcept in self.requiresElementFacts:
                                    self.modelXbrl.log('INCONSISTENCY', "xbrl.5.2.6.2.4:requiresElementInconsistency",
                                        _("Requires-Element %(requiringConcept)s missing required fact for %(requiredConcept)s in link role %(linkrole)s"),
                                        modelObject=sourceConcept, 
                                        requiringConcept=sourceConcept.qname, requiredConcept=requiredConcept.qname, 
                                        linkrole=ELR,
                                        linkroleDefinition=self.modelXbrl.roleTypeDefinition(ELR))
        self.modelXbrl.profileActivity("... find inconsistencies", minTimeToShow=1.0)
        self.modelXbrl.profileActivity() # reset

Example 137

Project: Arelle Source File: XmlValidate.py
def validateValue(modelXbrl, elt, attrTag, baseXsdType, value, isNillable=False, isNil=False, facets=None):
    if baseXsdType:
        try:
            '''
            if (len(value) == 0 and attrTag is None and not isNillable and 
                baseXsdType not in ("anyType", "string", "normalizedString", "token", "NMTOKEN", "anyURI", "noContent")):
                raise ValueError("missing value for not nillable element")
            '''
            xValid = VALID
            whitespaceReplace = (baseXsdType == "normalizedString")
            whitespaceCollapse = (not whitespaceReplace and baseXsdType != "string")
            isList = baseXsdType in {"IDREFS", "ENTITIES", "NMTOKENS"}
            if isList:
                baseXsdType = baseXsdType[:-1] # remove plural
            pattern = baseXsdTypePatterns.get(baseXsdType)
            if facets:
                if "pattern" in facets:
                    pattern = facets["pattern"]
                    # note multiple patterns are or'ed togetner, which isn't yet implemented!
                if "whiteSpace" in facets:
                    whitespaceReplace, whitespaceCollapse = {"preserve":(False,False), "replace":(True,False), "collapse":(False,True)}[facets["whiteSpace"]]
            if whitespaceReplace:
                value = normalizeWhitespacePattern.sub(' ', value)
            elif whitespaceCollapse:
                value = collapseWhitespacePattern.sub(' ', value.strip())
            if baseXsdType == "noContent":
                if len(value) > 0 and not value.isspace():
                    raise ValueError("value content not permitted")
                # note that sValue and xValue are not innerText but only text elements on specific element (or attribute)
                xValue = sValue = None
                xValid = VALID_NO_CONTENT # notify others that element may contain subelements (for stringValue needs)
            elif not value and isNil and isNillable: # rest of types get None if nil/empty value
                xValue = sValue = None
            else:
                if pattern is not None:
                    if ((isList and any(pattern.match(v) is None for v in value.split())) or
                        (not isList and pattern.match(value) is None)):
                        raise ValueError("pattern facet " + facets["pattern"].pattern if facets and "pattern" in facets else "pattern mismatch")
                if facets:
                    if "enumeration" in facets and value not in facets["enumeration"]:
                        raise ValueError("{0} is not in {1}".format(value, facets["enumeration"].keys()))
                    if "length" in facets and len(value) != facets["length"]:
                        raise ValueError("length {0}, expected {1}".format(len(value), facets["length"]))
                    if "minLength" in facets and len(value) < facets["minLength"]:
                        raise ValueError("length {0}, minLength {1}".format(len(value), facets["minLength"]))
                    if "maxLength" in facets and len(value) > facets["maxLength"]:
                        raise ValueError("length {0}, maxLength {1}".format(len(value), facets["maxLength"]))
                if baseXsdType in {"string", "normalizedString", "language", "token", "NMTOKEN","Name","NCName","IDREF","ENTITY"}:
                    xValue = sValue = value
                elif baseXsdType == "ID":
                    xValue = sValue = value
                    xValid = VALID_ID
                elif baseXsdType == "anyURI":
                    if value:  # allow empty strings to be valid anyURIs
                        if UrlUtil.relativeUrlPattern.match(value) is None:
                            raise ValueError("IETF RFC 2396 4.3 syntax")
                    # encode PSVI xValue similarly to Xerces and other implementations
                    xValue = anyURI(UrlUtil.anyUriQuoteForPSVI(value))
                    sValue = value
                elif baseXsdType in ("decimal", "float", "double"):
                    if baseXsdType == "decimal":
                        if decimalPattern.match(value) is None:
                            raise ValueError("lexical pattern mismatch")
                        xValue = Decimal(value)
                        sValue = float(value) # s-value uses Number (float) representation
                    else:
                        if floatPattern.match(value) is None:
                            raise ValueError("lexical pattern mismatch")
                        xValue = sValue = float(value)
                    if facets:
                        if "totalDigits" in facets and len(value.replace(".","")) > facets["totalDigits"]:
                            raise ValueError("totalDigits facet {0}".format(facets["totalDigits"]))
                        if "fractionDigits" in facets and ( '.' in value and
                            len(value[value.index('.') + 1:]) > facets["fractionDigits"]):
                            raise ValueError("fraction digits facet {0}".format(facets["fractionDigits"]))
                        if "maxInclusive" in facets and xValue > facets["maxInclusive"]:
                            raise ValueError(" > maxInclusive {0}".format(facets["maxInclusive"]))
                        if "maxExclusive" in facets and xValue >= facets["maxExclusive"]:
                            raise ValueError(" >= maxInclusive {0}".format(facets["maxExclusive"]))
                        if "minInclusive" in facets and xValue < facets["minInclusive"]:
                            raise ValueError(" < minInclusive {0}".format(facets["minInclusive"]))
                        if "minExclusive" in facets and xValue <= facets["minExclusive"]:
                            raise ValueError(" <= minExclusive {0}".format(facets["minExclusive"]))
                elif baseXsdType in {"integer",
                                     "nonPositiveInteger","negativeInteger","nonNegativeInteger","positiveInteger",
                                     "long","unsignedLong",
                                     "int","unsignedInt",
                                     "short","unsignedShort",
                                     "byte","unsignedByte"}:
                    xValue = sValue = _INT(value)
                    if ((baseXsdType in {"nonNegativeInteger","unsignedLong","unsignedInt"} 
                         and xValue < 0) or
                        (baseXsdType == "nonPositiveInteger" and xValue > 0) or
                        (baseXsdType == "positiveInteger" and xValue <= 0) or
                        (baseXsdType == "byte" and not -128 <= xValue < 127) or
                        (baseXsdType == "unsignedByte" and not 0 <= xValue < 255) or
                        (baseXsdType == "short" and not -32768 <= xValue < 32767) or
                        (baseXsdType == "unsignedShort" and not 0 <= xValue < 65535) or
                        (baseXsdType == "positiveInteger" and xValue <= 0)):
                        raise ValueError("{0} is not {1}".format(value, baseXsdType))
                    if facets:
                        if "totalDigits" in facets and len(value.replace(".","")) > facets["totalDigits"]:
                            raise ValueError("totalDigits facet {0}".format(facets["totalDigits"]))
                        if "fractionDigits" in facets and ( '.' in value and
                            len(value[value.index('.') + 1:]) > facets["fractionDigits"]):
                            raise ValueError("fraction digits facet {0}".format(facets["fractionDigits"]))
                        if "maxInclusive" in facets and xValue > facets["maxInclusive"]:
                            raise ValueError(" > maxInclusive {0}".format(facets["maxInclusive"]))
                        if "maxExclusive" in facets and xValue >= facets["maxExclusive"]:
                            raise ValueError(" >= maxInclusive {0}".format(facets["maxExclusive"]))
                        if "minInclusive" in facets and xValue < facets["minInclusive"]:
                            raise ValueError(" < minInclusive {0}".format(facets["minInclusive"]))
                        if "minExclusive" in facets and xValue <= facets["minExclusive"]:
                            raise ValueError(" <= minExclusive {0}".format(facets["minExclusive"]))
                elif baseXsdType == "boolean":
                    if value in ("true", "1"):  
                        xValue = sValue = True
                    elif value in ("false", "0"): 
                        xValue = sValue = False
                    else: raise ValueError
                elif baseXsdType == "QName":
                    xValue = qnameEltPfxName(elt, value, prefixException=ValueError)
                    #xValue = qname(elt, value, castException=ValueError, prefixException=ValueError)
                    sValue = value
                    ''' not sure here, how are explicitDimensions validated, but bad units not?
                    if xValue.namespaceURI in modelXbrl.namespaceDocs:
                        if (xValue not in modelXbrl.qnameConcepts and 
                            xValue not in modelXbrl.qnameTypes and
                            xValue not in modelXbrl.qnameAttributes and
                            xValue not in modelXbrl.qnameAttributeGroups):
                            raise ValueError("qname not defined " + str(xValue))
                    '''
                elif baseXsdType in ("XBRLI_DECIMALSUNION", "XBRLI_PRECISIONUNION"):
                    xValue = sValue = value if value == "INF" else _INT(value)
                elif baseXsdType in ("XBRLI_NONZERODECIMAL"):
                    xValue = sValue = _INT(value)
                    if xValue == 0:
                        raise ValueError("invalid value")
                elif baseXsdType == "XBRLI_DATEUNION":
                    xValue = dateTime(value, type=DATEUNION, castException=ValueError)
                    sValue = value
                elif baseXsdType == "dateTime":
                    xValue = dateTime(value, type=DATETIME, castException=ValueError)
                    sValue = value
                elif baseXsdType == "date":
                    xValue = dateTime(value, type=DATE, castException=ValueError)
                    sValue = value
                elif baseXsdType == "regex-pattern":
                    # for facet compiling
                    try:
                        sValue = value
                        if value in xmlSchemaPatterns:
                            xValue = xmlSchemaPatterns[value]
                        else:
                            if r"\i" in value or r"\c" in value:
                                value = value.replace(r"\i", iNameChar).replace(r"\c", cNameChar)
                            xValue = re_compile(value + "$") # must match whole string
                    except Exception as err:
                        raise ValueError(err)
                elif baseXsdType == "fraction":
                    sValue = value
                    xValue = Fraction("/".join(elt.fractionValue))
                else:
                    if baseXsdType in lexicalPatterns:
                        match = lexicalPatterns[baseXsdType].match(value)
                        if match is None:
                            raise ValueError("lexical pattern mismatch")
                        if baseXsdType == "gMonthDay":
                            month, day, zSign, zHrMin, zHr, zMin = match.groups()
                            if int(day) > {2:29, 4:30, 6:30, 9:30, 11:30, 1:31, 3:31, 5:31, 7:31, 8:31, 10:31, 12:31}[int(month)]:
                                raise ValueError("invalid day {0} for month {1}".format(day, month))
                            xValue = gMonthDay(month, day)
                        elif baseXsdType == "gYearMonth":
                            year, month, zSign, zHrMin, zHr, zMin = match.groups()
                            xValue = gYearMonth(year, month)
                        elif baseXsdType == "gYear":
                            year, zSign, zHrMin, zHr, zMin = match.groups()
                            xValue = gYear(year)
                        elif baseXsdType == "gMonth":
                            month, zSign, zHrMin, zHr, zMin = match.groups()
                            xValue = gMonth(month)
                        elif baseXsdType == "gDay":
                            day, zSign, zHrMin, zHr, zMin = match.groups()
                            xValue = gDay(day)
                        else:
                            xValue = value
                    else: # no lexical pattern, forget compiling value
                        xValue = value
                    sValue = value
        except (ValueError, InvalidOperation) as err:
            if ModelInlineValueObject is not None and isinstance(elt, ModelInlineValueObject):
                errElt = "{0} fact {1}".format(elt.elementQname, elt.qname)
            else:
                errElt = elt.elementQname
            if attrTag:
                modelXbrl.error("xmlSchema:valueError",
                    _("Element %(element)s attribute %(attribute)s type %(typeName)s value error: %(value)s, %(error)s"),
                    modelObject=elt,
                    element=errElt,
                    attribute=XmlUtil.clarkNotationToPrefixedName(elt,attrTag,isAttribute=True),
                    typeName=baseXsdType,
                    value=strTruncate(value, 30),
                    error=err)
            else:
                modelXbrl.error("xmlSchema:valueError",
                    _("Element %(element)s type %(typeName)s value error: %(value)s, %(error)s"),
                    modelObject=elt,
                    element=errElt,
                    typeName=baseXsdType,
                    value=strTruncate(value, 30),
                    error=err)
            xValue = None
            sValue = value
            xValid = INVALID
    else:
        xValue = sValue = None
        xValid = UNKNOWN
    if attrTag:
        try:  # dynamically allocate attributes (otherwise given shared empty set)
            xAttributes = elt.xAttributes
        except AttributeError:
            elt.xAttributes = xAttributes = {}
        xAttributes[attrTag] = ModelAttribute(elt, attrTag, xValid, xValue, sValue, value)
    else:
        elt.xValid = xValid
        elt.xValue = xValue
        elt.sValue = sValue

Example 138

Project: zamboni Source File: webpay.py
@csrf_exempt
@use_master
@require_POST
def postback(request):
    """Verify signature and set contribution to paid."""
    signed_jwt = request.POST.get('notice', '')
    try:
        data = parse_from_webpay(signed_jwt, request.META.get('REMOTE_ADDR'))
    except InvalidSender, exc:
        app_pay_cef.log(request, 'Unknown app', 'invalid_postback',
                        'Ignoring invalid JWT %r: %s' % (signed_jwt, exc),
                        severity=4)
        return http.HttpResponseBadRequest()

    pd = urlparse.parse_qs(data['request']['productData'])
    contrib_uuid = pd['contrib_uuid'][0]
    try:
        contrib = Contribution.objects.get(uuid=contrib_uuid)
    except Contribution.DoesNotExist:
        etype, val, tb = sys.exc_info()
        raise LookupError('JWT (iss:%s, aud:%s) for trans_id %s '
                          'links to contrib %s which doesn\'t exist'
                          % (data['iss'], data['aud'],
                             data['response']['transactionID'],
                             contrib_uuid)), None, tb

    trans_id = data['response']['transactionID']

    if contrib.is_inapp_simulation():
        return simulated_postback(contrib, trans_id)

    if contrib.transaction_id is not None:
        if contrib.transaction_id == trans_id:
            app_pay_cef.log(request, 'Repeat postback', 'repeat_postback',
                            'Postback sent again for: %s' % (contrib.addon.pk),
                            severity=4)
            return http.HttpResponse(trans_id)
        else:
            app_pay_cef.log(request, 'Repeat postback with new trans_id',
                            'repeat_postback_new_trans_id',
                            'Postback sent again for: %s, but with new '
                            'trans_id: %s' % (contrib.addon.pk, trans_id),
                            severity=7)
            raise LookupError(
                'JWT (iss:{iss}, aud:{aud}) for trans_id {jwt_trans} is '
                'for contrib {contrib_uuid} that is already paid and has '
                'a different trans_id: {contrib_trans}'
                .format(iss=data['iss'], aud=data['aud'],
                        jwt_trans=data['response']['transactionID'],
                        contrib_uuid=contrib_uuid,
                        contrib_trans=contrib.transaction_id))

    # Special-case free in-app products.
    if data.get('request', {}).get('pricePoint') == '0':
        solitude_buyer_uuid = data['response']['solitude_buyer_uuid']

        try:
            buyer = (solitude.api.generic
                                 .buyer
                                 .get_object_or_404)(uuid=solitude_buyer_uuid)
        except ObjectDoesNotExist:
            raise LookupError(
                'Unable to look up buyer: {uuid} in Solitude'
                .format(uuid=solitude_buyer_uuid))
        user_profile = _get_user_profile(request, buyer.get('email'))
        return free_postback(request, contrib, trans_id, user_profile)
    try:
        transaction_data = (solitude.api.generic
                                        .transaction
                                        .get_object_or_404)(uuid=trans_id)
    except ObjectDoesNotExist:
        raise LookupError(
            'Unable to look up transaction: {trans_id} in Solitude'
            .format(trans_id=trans_id))

    buyer_uri = transaction_data['buyer']

    try:
        buyer_data = solitude.api.by_url(buyer_uri).get_object_or_404()
    except ObjectDoesNotExist:
        raise LookupError(
            'Unable to look up buyer: {buyer_uri} in Solitude'
            .format(buyer_uri=buyer_uri))

    buyer_email = buyer_data['email']

    user_profile = _get_user_profile(request, buyer_email)

    log.info(u'webpay postback: fulfilling purchase for contrib {c} with '
             u'transaction {t}'.format(c=contrib, t=trans_id))
    app_pay_cef.log(request, 'Purchase complete', 'purchase_complete',
                    'Purchase complete for: %s' % (contrib.addon.pk),
                    severity=3)

    contrib.update(transaction_id=trans_id,
                   type=mkt.CONTRIB_PURCHASE,
                   user=user_profile,
                   amount=Decimal(data['response']['price']['amount']),
                   currency=data['response']['price']['currency'])

    tasks.send_purchase_receipt.delay(contrib.pk)

    return http.HttpResponse(trans_id)

Example 139

Project: django-postal Source File: resource.py
    def construct(self):
        """
        Recursively serialize a lot of types, and
        in cases where it doesn't recognize the type,
        it will fall back to Django's `smart_unicode`.

        Returns `dict`.
        """
        def _any(thing, fields=None):
            """
            Dispatch, all types are routed through here.
            """
            ret = None

            if isinstance(thing, QuerySet):
                ret = _qs(thing, fields)
            elif isinstance(thing, (tuple, list, set)):
                ret = _list(thing, fields)
            elif isinstance(thing, dict):
                ret = _dict(thing, fields)
            elif isinstance(thing, decimal.Decimal):
                ret = str(thing)
            elif isinstance(thing, Model):
                ret = _model(thing, fields)
            elif isinstance(thing, HttpResponse):
                raise HttpStatusCode(thing)
            elif inspect.isfunction(thing):
                if not inspect.getargspec(thing)[0]:
                    ret = _any(thing())
            elif hasattr(thing, '__emittable__'):
                f = thing.__emittable__
                if inspect.ismethod(f) and len(inspect.getargspec(f)[0]) == 1:
                    ret = _any(f())
            elif repr(thing).startswith("<django.db.models.fields.related.RelatedManager"):
                ret = _any(thing.all())
            else:
                ret = smart_unicode(thing, strings_only=True)

            return ret

        def _fk(data, field):
            """
            Foreign keys.
            """
            return _any(getattr(data, field.name))

        def _related(data, fields=None):
            """
            Foreign keys.
            """
            return [ _model(m, fields) for m in data.iterator() ]

        def _m2m(data, field, fields=None):
            """
            Many to many (re-route to `_model`.)
            """
            return [ _model(m, fields) for m in getattr(data, field.name).iterator() ]

        def _model(data, fields=None):
            """
            Models. Will respect the `fields` and/or
            `exclude` on the handler (see `typemapper`.)
            """
            ret = { }
            handler = self.in_typemapper(type(data), self.anonymous)
            get_absolute_uri = False

            if handler or fields:
                v = lambda f: getattr(data, f.attname)

                if handler:
                    fields = getattr(handler, 'fields')

                if not fields or hasattr(handler, 'fields'):
                    """
                    Fields was not specified, try to find teh correct
                    version in the typemapper we were sent.
                    """
                    mapped = self.in_typemapper(type(data), self.anonymous)
                    get_fields = set(mapped.fields)
                    exclude_fields = set(mapped.exclude).difference(get_fields)

                    if 'absolute_uri' in get_fields:
                        get_absolute_uri = True

                    if not get_fields:
                        get_fields = set([ f.attname.replace("_id", "", 1)
                            for f in data._meta.fields + data._meta.virtual_fields])

                    if hasattr(mapped, 'extra_fields'):
                        get_fields.update(mapped.extra_fields)

                    # sets can be negated.
                    for exclude in exclude_fields:
                        if isinstance(exclude, basestring):
                            get_fields.discard(exclude)

                        elif isinstance(exclude, re._pattern_type):
                            for field in get_fields.copy():
                                if exclude.match(field):
                                    get_fields.discard(field)

                else:
                    get_fields = set(fields)

                met_fields = self.method_fields(handler, get_fields)

                for f in data._meta.local_fields + data._meta.virtual_fields:
                    if f.serialize and not any([ p in met_fields for p in [ f.attname, f.name ]]):
                        if not f.rel:
                            if f.attname in get_fields:
                                ret[f.attname] = _any(v(f))
                                get_fields.remove(f.attname)
                        else:
                            if f.attname[:-3] in get_fields:
                                ret[f.name] = _fk(data, f)
                                get_fields.remove(f.name)

                for mf in data._meta.many_to_many:
                    if mf.serialize and mf.attname not in met_fields:
                        if mf.attname in get_fields:
                            ret[mf.name] = _m2m(data, mf)
                            get_fields.remove(mf.name)

                # try to get the remainder of fields
                for maybe_field in get_fields:
                    if isinstance(maybe_field, (list, tuple)):
                        model, fields = maybe_field
                        inst = getattr(data, model, None)

                        if inst:
                            if hasattr(inst, 'all'):
                                ret[model] = _related(inst, fields)
                            elif callable(inst):
                                if len(inspect.getargspec(inst)[0]) == 1:
                                    ret[model] = _any(inst(), fields)
                            else:
                                ret[model] = _model(inst, fields)

                    elif maybe_field in met_fields:
                        # Overriding normal field which has a "resource method"
                        # so you can alter the contents of certain fields without
                        # using different names.
                        ret[maybe_field] = _any(met_fields[maybe_field](data))

                    else:
                        maybe = getattr(data, maybe_field, None)
                        if maybe is not None:
                            if callable(maybe):
                                if len(inspect.getargspec(maybe)[0]) <= 1:
                                    ret[maybe_field] = _any(maybe())
                            else:
                                ret[maybe_field] = _any(maybe)
                        else:
                            handler_f = getattr(handler or self.handler, maybe_field, None)

                            if handler_f:
                                ret[maybe_field] = _any(handler_f(data))

            else:
                for f in data._meta.fields:
                    ret[f.attname] = _any(getattr(data, f.attname))

                fields = dir(data.__class__) + ret.keys()
                add_ons = [k for k in dir(data) if k not in fields]

                for k in add_ons:
                    ret[k] = _any(getattr(data, k))

            # resouce uri
            if self.in_typemapper(type(data), self.anonymous):
                handler = self.in_typemapper(type(data), self.anonymous)
                if hasattr(handler, 'resource_uri'):
                    url_id, fields = handler.resource_uri(data)

                    try:
                        ret['resource_uri'] = permalink(lambda: (url_id, fields))()
                    except NoReverseMatch, e:
                        pass

            if hasattr(data, 'get_api_url') and 'resource_uri' not in ret:
                try:
                    ret['resource_uri'] = data.get_api_url()
                except:
                    pass

            # absolute uri
            if hasattr(data, 'get_absolute_url') and get_absolute_uri:
                try:
                    ret['absolute_uri'] = data.get_absolute_url()
                except:
                    pass

            return ret

        def _qs(data, fields=None):
            """
            Querysets.
            """
            return [_any(v, fields) for v in data ]

        def _list(data, fields=None):
            """
            Lists.
            """
            return [_any(v, fields) for v in data ]

        def _dict(data, fields=None):
            """
            Dictionaries.
            """
            return dict([(k, _any(v, fields)) for k, v in data.iteritems()])

        # Kickstart the seralizin'.
        return _any(self.data, self.fields)

Example 140

Project: multichain-explorer Source File: SqlAbstraction.py
Function: set_flavour
    def _set_flavour(sql):
        def identity(x):
            return x
        transform = identity
        transform_stmt = sql._transform_stmt
        selectall = sql._selectall

        if sql.module.paramstyle in ('format', 'pyformat'):
            transform_stmt = sql._qmark_to_format(transform_stmt)
        elif sql.module.paramstyle == 'named':
            transform_stmt = sql._qmark_to_named(transform_stmt)
        elif sql.module.paramstyle != 'qmark':
            sql.log.warning("Database parameter style is "
                            "%s, trying qmark", sql.module.paramstyle)
            pass

        # Binary I/O with the database.
        # Reversed versions exist for Bitcoin hashes; since the
        # protocol treats them as 256-bit integers and represents them
        # as little endian, we have to reverse them in hex to satisfy
        # human expectations.
        def rev(x):
            return None if x is None else x[::-1]
        def to_hex(x):
            return None if x is None else str(x).encode('hex')
        def from_hex(x):
            return None if x is None else x.decode('hex')
        def to_hex_rev(x):
            return None if x is None else str(x)[::-1].encode('hex')
        def from_hex_rev(x):
            return None if x is None else x.decode('hex')[::-1]

        val = sql.config.get('binary_type')

        if val in (None, 'str', "binary"):
            binin       = identity
            binin_hex   = from_hex
            binout      = identity
            binout_hex  = to_hex
            revin       = rev
            revin_hex   = from_hex
            revout      = rev
            revout_hex  = to_hex

        elif val in ("buffer", "bytearray", "pg-bytea"):
            if val == "bytearray":
                def to_btype(x):
                    return None if x is None else bytearray(x)
            else:
                def to_btype(x):
                    return None if x is None else buffer(x)

            def to_str(x):
                return None if x is None else str(x)

            binin       = to_btype
            binin_hex   = lambda x: to_btype(from_hex(x))
            binout      = to_str
            binout_hex  = to_hex
            revin       = lambda x: to_btype(rev(x))
            revin_hex   = lambda x: to_btype(from_hex(x))
            revout      = rev
            revout_hex  = to_hex

            if val == "pg-bytea":
                transform_stmt = sql._binary_as_bytea(transform_stmt)

        elif val == "hex":
            transform = sql._binary_as_hex(transform)
            binin       = to_hex
            binin_hex   = identity
            binout      = from_hex
            binout_hex  = identity
            revin       = to_hex_rev
            revin_hex   = identity
            revout      = from_hex_rev
            revout_hex  = identity

        else:
            raise Exception("Unsupported binary-type %s" % (val,))

        val = sql.config.get('int_type')
        if val in (None, 'int'):
            intin = identity

        elif val == 'decimal':
            import decimal
            def _intin(x):
                return None if x is None else decimal.Decimal(x)
            intin = _intin

        elif val == 'str':
            def _intin(x):
                return None if x is None else str(x)
            intin = _intin
            # Work around sqlite3's integer overflow.
            transform = sql._approximate(transform)

        else:
            raise Exception("Unsupported int-type %s" % (val,))

        val = sql.config.get('sequence_type')
        if val in (None, 'update'):
            new_id = lambda key: sql._new_id_update(key)
            create_sequence = lambda key: sql._create_sequence_update(key)
            drop_sequence = lambda key: sql._drop_sequence_update(key)

        else:
            raise Exception("Unsupported sequence-type %s" % (val,))

        val = sql.config.get('limit_style')
        if val in (None, 'native'):
            pass
        elif val == 'emulated':
            selectall = sql.emulate_limit(selectall)

        val = sql.config.get('concat_style')
        if val in (None, 'ansi'):
            pass

        transform_stmt = sql._append_table_epilogue(transform_stmt)

        transform = sql._fallback_to_lob(transform)
        transform = sql._fallback_to_approximate(transform)

        sql.transform_chunk = transform
        sql.transform_stmt = transform_stmt
        sql.selectall = selectall
        sql._cache = {}

        sql.binin       = binin
        sql.binin_hex   = binin_hex
        sql.binout      = binout
        sql.binout_hex  = binout_hex
        sql.revin       = revin
        sql.revin_hex   = revin_hex
        sql.revout      = revout
        sql.revout_hex  = revout_hex

        # Might reimplement these someday...
        def binout_int(x):
            if x is None:
                return None
            return int(binout_hex(x), 16)
        def binin_int(x, bits):
            if x is None:
                return None
            return binin_hex(("%%0%dx" % (bits / 4)) % x)
        sql.binout_int  = binout_int
        sql.binin_int   = binin_int

        sql.intin       = intin
        sql.new_id      = new_id
        sql.create_sequence = create_sequence
        sql.drop_sequence = drop_sequence

Example 141

Project: vosae-app Source File: test_runner.py
    def run_suite(self, suite, **kwargs):
        from django.contrib.auth import get_user_model

        def set_global_initial_data():
            from invoicing.models import Currency, ExchangeRate

            # Add Vosae supported currencies
            currencies_symbols = dict(settings.VOSAE_SUPPORTED_CURRENCIES).keys()
            exchange_rate_dt = datetime.datetime.now()
            for symbol in currencies_symbols:
                currency = Currency(symbol=symbol)
                for to_symbol in currencies_symbols:
                    if to_symbol == symbol:
                        continue
                    currency.rates.append(ExchangeRate(currency_to=to_symbol, datetime=exchange_rate_dt, rate=Decimal('1.00')))
                currency.save(upsert=True)

        def create_tenant(tenant_name, attached_users=[]):
            from django.contrib.auth.models import Group
            from core.models import Tenant, FRRegistrationInfo
            from contacts.models import Address
            from invoicing.models import Currency

            tenant = Tenant(
                name=tenant_name,
                email='[email protected]',
                postal_address=Address(street_address='Street address'),
                billing_address=Address(street_address='Street address'),
                registration_info=FRRegistrationInfo(
                    share_capital='100 000 EUR',
                    siret='123 456 789 012 00001',
                    rcs_number='PARIS 005',
                    vat_number='FR01234567890'
                )
            )

            # Tenant settings
            tenant.tenant_settings.invoicing.supported_currencies = [
                Currency.objects.get(symbol='EUR'),
                Currency.objects.get(symbol='USD')
            ]
            tenant.tenant_settings.invoicing.default_currency = Currency.objects.get(symbol='EUR')
            tenant.save()

            group = Group.objects.get(name=tenant.slug)
            for attached_user in attached_users:
                group.user_set.add(attached_user)
            group.save()
            return tenant

        def set_tenant_initial_data(tenant_slug, user_email):
            from core.models import Tenant, VosaeUser, VosaeGroup
            from invoicing.models import Tax

            tenant = Tenant.objects.get(slug=tenant_slug)
            # Vosae user
            vosae_user = VosaeUser(
                tenant=tenant,
                email=user_email,
                groups=[VosaeGroup.objects.get(tenant=tenant, is_admin=True)]
            ).save()

            # Taxes
            Tax(tenant=tenant, name=u'Exempt', rate=Decimal('0.00')).save()
            Tax(tenant=tenant, name=u'TVA', rate=Decimal('0.055')).save()
            Tax(tenant=tenant, name=u'TVA', rate=Decimal('0.07')).save()
            Tax(tenant=tenant, name=u'TVA', rate=Decimal('0.196')).save()
            return vosae_user

        UserModel = get_user_model()
        if not 'VOSAE_EXPORT_TESTS_RESULTS' in os.environ:
            print 'Tests \033[1mwill not\033[0m generate docuementation data'
        else:
            confirm = raw_input('Tests \033[1mwill\033[0m generate docuementation data. Proceed? [Y/n]: ')
            while True:
                if confirm.lower() not in ('y', '', 'n'):
                    confirm = raw_input('Please enter either "y", "n" or leave blank for default (y): ')
                    continue
                if confirm.lower() == 'n':
                    print 'Aborting...'
                    return DummyTestResult()
                break
        print 'Initializing test environnement...'
        django_user = UserModel.objects.create_user('[email protected]', 'password', active=True, send_email=False)
        set_global_initial_data()
        settings.TENANT = create_tenant('Test Company', [django_user])
        settings.VOSAE_USER = set_tenant_initial_data('test-company', '[email protected]')

        # Should reload to get fields updated by post init tasks
        settings.TENANT.reload()
        settings.VOSAE_USER.reload()

        # Other settings overrides
        settings.STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'

        # DEBUG is false by default with DjangoTestSuiteRunner
        # Cf. http://python.6.n6.nabble.com/Why-does-django-s-default-test-suite-runner-set-settings-DEBUG-False-td297023.html#a297025
        # But in some cases, for API tests, it is useful to set to True.
        # settings.DEBUG = True
        return super(VosaeTestRunner, self).run_suite(suite, **kwargs)

Example 142

Project: dreampie Source File: automation.py
    def _set_value(self, value):
        _VariantClear(self)
        if value is None:
            self.vt = VT_NULL
        # since bool is a subclass of int, this check must come before
        # the check for int
        elif isinstance(value, bool):
            self.vt = VT_BOOL
            self._.VT_BOOL = value
        elif isinstance(value, (int, c_int)):
            self.vt = VT_I4
            self._.VT_I4 = value
        elif isinstance(value, long):
            u = self._
            # try VT_I4 first.
            u.VT_I4 = value
            if u.VT_I4 == value:
                # it did work.
                self.vt = VT_I4
                return
            # try VT_UI4 next.
            if value >= 0:
                u.VT_UI4 = value
                if u.VT_UI4 == value:
                    # did work.
                    self.vt = VT_UI4
                    return
            # try VT_I8 next.
            if value >= 0:
                u.VT_I8 = value
                if u.VT_I8 == value:
                    # did work.
                    self.vt = VT_I8
                    return
            # try VT_UI8 next.
            if value >= 0:
                u.VT_UI8 = value
                if u.VT_UI8 == value:
                    # did work.
                    self.vt = VT_UI8
                    return
            # VT_R8 is last resort.
            self.vt = VT_R8
            u.VT_R8 = float(value)
        elif isinstance(value, (float, c_double)):
            self.vt = VT_R8
            self._.VT_R8 = value
        elif isinstance(value, (str, unicode)):
            self.vt = VT_BSTR
            # do the c_wchar_p auto unicode conversion
            self._.c_void_p = _SysAllocStringLen(value, len(value))
        elif isinstance(value, datetime.datetime):
            delta = value - _com_null_date
            # a day has 24 * 60 * 60 = 86400 seconds
            com_days = delta.days + (delta.seconds + delta.microseconds * 1e-6) / 86400.
            self.vt = VT_DATE
            self._.VT_R8 = com_days
        elif decimal is not None and isinstance(value, decimal.Decimal):
            self._.VT_CY = int(round(value * 10000))
            self.vt = VT_CY
        elif isinstance(value, POINTER(IDispatch)):
            CopyComPointer(value, byref(self._))
            self.vt = VT_DISPATCH
        elif isinstance(value, POINTER(IUnknown)):
            CopyComPointer(value, byref(self._))
            self.vt = VT_UNKNOWN
        elif isinstance(value, (list, tuple)):
            obj = _midlSAFEARRAY(VARIANT).create(value)
            memmove(byref(self._), byref(obj), sizeof(obj))
            self.vt = VT_ARRAY | obj._vartype_
        elif isinstance(value, array.array):
            vartype = _arraycode_to_vartype[value.typecode]
            typ = _vartype_to_ctype[vartype]
            obj = _midlSAFEARRAY(typ).create(value)
            memmove(byref(self._), byref(obj), sizeof(obj))
            self.vt = VT_ARRAY | obj._vartype_
        elif isinstance(value, Structure) and hasattr(value, "_recordinfo_"):
            guids = value._recordinfo_
            from comtypes.typeinfo import GetRecordInfoFromGuids
            ri = GetRecordInfoFromGuids(*guids)
            self.vt = VT_RECORD
            # Assigning a COM pointer to a structure field does NOT
            # call AddRef(), have to call it manually:
            ri.AddRef()
            self._.pRecInfo = ri
            self._.pvRecord = ri.RecordCreateCopy(byref(value))
        elif isinstance(getattr(value, "_comobj", None), POINTER(IDispatch)):
            CopyComPointer(value._comobj, byref(self._))
            self.vt = VT_DISPATCH
        elif isinstance(value, VARIANT):
            _VariantCopy(self, value)
        elif isinstance(value, c_ubyte):
            self._.VT_UI1 = value
            self.vt = VT_UI1
        elif isinstance(value, c_char):
            self._.VT_UI1 = ord(value.value)
            self.vt = VT_UI1
        elif isinstance(value, c_byte):
            self._.VT_I1 = value
            self.vt = VT_I1
        elif isinstance(value, c_ushort):
            self._.VT_UI2 = value
            self.vt = VT_UI2
        elif isinstance(value, c_short):
            self._.VT_I2 = value
            self.vt = VT_I2
        elif isinstance(value, c_uint):
            self.vt = VT_UI4
            self._.VT_UI4 = value
        elif isinstance(value, c_float):
            self.vt = VT_R4
            self._.VT_R4 = value
        else:
            raise TypeError("Cannot put %r in VARIANT" % value)

Example 143

Project: dreampie Source File: automation.py
    def _get_value(self, dynamic=False):
        vt = self.vt
        if vt in (VT_EMPTY, VT_NULL):
            return None
        elif vt == VT_I1:
            return self._.VT_I1
        elif vt == VT_I2:
            return self._.VT_I2
        elif vt == VT_I4:
            return self._.VT_I4
        elif vt == VT_I8:
            return self._.VT_I8
        elif vt == VT_UI8:
            return self._.VT_UI8
        elif vt == VT_INT:
            return self._.VT_INT
        elif vt == VT_UI1:
            return self._.VT_UI1
        elif vt == VT_UI2:
            return self._.VT_UI2
        elif vt == VT_UI4:
            return self._.VT_UI4
        elif vt == VT_UINT:
            return self._.VT_UINT
        elif vt == VT_R4:
            return self._.VT_R4
        elif vt == VT_R8:
            return self._.VT_R8
        elif vt == VT_BOOL:
            return self._.VT_BOOL
        elif vt == VT_BSTR:
            return self._.bstrVal
        elif vt == VT_DATE:
            days = self._.VT_R8
            return datetime.timedelta(days=days) + _com_null_date
        elif vt == VT_CY:
            if decimal is not None:
                return self._.VT_CY / decimal.Decimal("10000")
            else:
                return self._.VT_CY / 10000.
        elif vt == VT_UNKNOWN:
            val = self._.c_void_p
            if not val:
                # We should/could return a NULL COM pointer.
                # But the code generation must be able to construct one
                # from the __repr__ of it.
                return None # XXX?
            ptr = cast(val, POINTER(IUnknown))
            # cast doesn't call AddRef (it should, imo!)
            ptr.AddRef()
            return ptr.__ctypes_from_outparam__()
        elif vt == VT_DISPATCH:
            val = self._.c_void_p
            if not val:
                # See above.
                return None # XXX?
            ptr = cast(val, POINTER(IDispatch))
            # cast doesn't call AddRef (it should, imo!)
            ptr.AddRef()
            if not dynamic:
                return ptr.__ctypes_from_outparam__()
            else:
                from comtypes.client.dynamic import Dispatch
                return Dispatch(ptr)
        # see also c:/sf/pywin32/com/win32com/src/oleargs.cpp
        elif self.vt & VT_BYREF:
            return self
        elif vt == VT_RECORD:
            from comtypes.client import GetModule
            from comtypes.typeinfo import IRecordInfo

            # Retrieving a COM pointer from a structure field does NOT
            # call AddRef(), have to call it manually:
            punk = self._.pRecInfo
            punk.AddRef()
            ri = punk.QueryInterface(IRecordInfo)

            # find typelib
            tlib = ri.GetTypeInfo().GetContainingTypeLib()[0]

            # load typelib wrapper module
            mod = GetModule(tlib)
            # retrive the type and create an instance
            value = getattr(mod, ri.GetName())()
            # copy data into the instance
            ri.RecordCopy(self._.pvRecord, byref(value))

            return value
        elif self.vt & VT_ARRAY:
            typ = _vartype_to_ctype[self.vt & ~VT_ARRAY]
            return cast(self._.pparray, _midlSAFEARRAY(typ)).unpack()
        else:
            raise NotImplementedError("typecode %d = 0x%x)" % (vt, vt))

Example 144

Project: onepercentclub-site Source File: 0007_vouchers.py
    def forwards(self, orm):
        db.send_create_signal('payments_voucher', ['voucherpayment'])

        total = orm['vouchers.Voucher'].objects.count()
        t = 1
        content_types = orm['contenttypes.ContentType'].objects
        voucher_payment_type_id = content_types.get(app_label='payments_voucher', model='voucherpayment').id

        for old_voucher in orm['vouchers.Voucher'].objects.all():
            print "Migrating voucher {0}/{1}".format(t, total)
            t += 1
            amount = Decimal(old_voucher.amount) / 100
            voucher = orm['payments_voucher.Voucher'].objects.create(
                code=old_voucher.code,
                message=old_voucher.message,
                status=old_voucher.status,
                created=old_voucher.created,
                updated=old_voucher.updated,
                sender_id=old_voucher.sender_id,
                sender_email=old_voucher.sender_email,
                sender_name=old_voucher.sender_name,
                receiver_id=old_voucher.receiver_id,
                receiver_email=old_voucher.receiver_email,
                receiver_name=old_voucher.receiver_name,
                order_id=old_voucher.order_id,
                amount=amount
            )
            voucher.save()

            for old_donation in orm['fund.Donation'].objects.filter(voucher=old_voucher).all():
                print "Migrating voucher donation...."

                # For every donation we should create an Order, OrderPayment and VoucherPayment.
                amount = Decimal(old_donation.amount) / 100

                # Create an order
                order = orm['orders.Order'].objects.create(
                    created=old_donation.created,
                    confirmed=old_donation.ready,
                    completed=old_donation.ready,
                    updated=old_donation.ready,
                    user=old_donation.user,
                    total=amount,
                    order_type='voucher',
                    status=StatusDefinition.SUCCESS
                )
                order.save()

                # Create a new Donation
                donation, created = orm['donations.Donation'].objects.get_or_create(
                    id=old_donation.id,
                    project=old_donation.project,
                    amount=amount
                )
                donation.created = old_donation.created
                donation.updated = old_donation.ready
                donation.order = order
                donation.save()

                # Create an order payment
                order_payment = orm['payments.OrderPayment'].objects.create(
                    created=old_donation.created,
                    updated=old_donation.ready,
                    order=order,
                    closed=old_donation.ready,
                    amount=amount,
                    transaction_fee=Decimal(0),
                    payment_method='voucherVoucher',
                    status=StatusDefinition.SETTLED
                )
                order_payment.save()

                # Create a voucher payment
                voucher_payment = orm['payments_voucher.VoucherPayment'].objects.create(
                    order_payment=order_payment,
                    voucher=voucher,
                    created=old_donation.created,
                    updated=old_donation.ready,
                    status=StatusDefinition.SETTLED,
                    polymorphic_ctype_id=voucher_payment_type_id
                )
                voucher_payment.save()

        # Manually reset the sequences for the donations that got migrated IDs.
        reset_db_sequence('donations_donation')

Example 145

Project: onepercentclub-site Source File: process_monthly_donations.py
def prepare_monthly_donations():
    """
    Prepare MonthlyOrders.
    """

    ten_days_ago = timezone.now() + timezone.timedelta(days=-10)
    recent_batches = MonthlyBatch.objects.filter(date__gt=ten_days_ago)
    if recent_batches.count() > 0:
        recent_batch = recent_batches.all()[0]
        message = "Found a recent batch {0} : {1}. Refusing to create another one quite now.".format(recent_batch.id, recent_batch)
        logger.error(message)
        return

    batch = MonthlyBatch.objects.create(date=now())
    batch.save()
    top_three_donation = False

    donor_queryset = MonthlyDonor.objects.filter(active=True).order_by('user__email')

    recurring_donation_errors = []
    RecurringDonationError = namedtuple('RecurringDonationError', 'recurring_payment error_message')
    skipped_recurring_payments = []
    SkippedRecurringPayment = namedtuple('SkippedRecurringPayment', 'recurring_payment orders')
    donation_count = 0

    popular_projects_all = PROJECT_MODEL.objects.exclude(skip_monthly=True, amount_needed=0).filter(status=ProjectPhase.objects.get(slug="campaign")).order_by('-popularity')
    top_three_projects = list(popular_projects_all[:3])
    top_projects = list(popular_projects_all[3:])

    logger.info("Config: Using these projects as 'Top Three':")
    for project in top_three_projects:
        logger.info("  {0}".format(project.title.encode("utf8")))

    # The main loop that processes each monthly donation.
    for donor in donor_queryset:

        # Remove DonorProjects for Projects that no longer need money.
        # This is amount_needed from db minus the amount already appointed in previous MonthlyDonations
        for donor_project in donor.projects.all():
            if donor_project.project.status != ProjectPhase.objects.get(slug="campaign"):
                logger.info(u"Project not in Campaign phase. Skipping '{0}'".format(donor_project.project.title))
                donor_project.delete()
            elif donor_project.project.amount_needed <= 0:
                logger.info(u"Project already funded. Skipping '{0}'".format(donor_project.project.title))
                donor_project.delete()
            else:
                monthly_project, created = MonthlyProject.objects.get_or_create(batch=batch, project=donor_project.project)
                if donor_project.project.amount_needed - monthly_project.amount <= 0:
                    logger.info(u"Project already funded. Skipping '{0}'".format(donor_project.project.title))
                    donor_project.delete()

        # Remove Projects from top 3
        for project in top_three_projects:
            monthly_project, created = MonthlyProject.objects.get_or_create(batch=batch, project=project)
            if project.amount_needed - monthly_project.amount <= 0:
                # Remove project if it's doesn't need more many and add another from top_projects
                logger.info(u"Top3 project fully funded. Skipping '{0}'".format(project.title))
                top_three_projects.remove(project)
                new_project = top_projects.pop(0)
                logger.info(u"New Top3 project added '{0}'".format(new_project.title))
                top_three_projects.append(new_project)

        # Check if the donor object is valid
        if not donor.is_valid:
            error_message = "MonthlyDonor [{0}] invalid! IBAN/BIC missing or amount wrong.".format(donor.id)
            logger.error(error_message)
            recurring_donation_errors.append(RecurringDonationError(donor, error_message))
            continue

        # Create MonthlyOrder and MonthlyDonation objects
        if donor.projects.count():
            # Prepare a MonthlyOrder with preferred projects
            preferred_projects = []
            for project in donor.projects.all():
                preferred_projects.append(project.project)
            recurring_order = create_recurring_order(donor.user, preferred_projects, batch, donor)
            logger.debug("Preparing an Order with preferred projects for user: {0}.".format(donor.user))
        else:
            # Prepare MonthlyOrder with Donations for the top three projects.
            logger.debug("Preparing new 'Top Three' Order for user {0}.".format(donor.user))
            recurring_order = create_recurring_order(donor.user, top_three_projects, batch, donor)
            top_three_donation = True

        # Update amounts for projects
        for donation in recurring_order.donations.all():
            monthly_project, created = MonthlyProject.objects.get_or_create(batch=batch, project=donation.project)
            monthly_project.amount += donation.amount
            monthly_project.save()

        # At this point the order should be correctly setup and ready for the DocData payment.
        if top_three_donation:
            donation_type_message = "supporting the 'Top Three' projects"
        else:
            donation_type_message = "with {0} donations".format(recurring_order.donations.count())
        logger.info("Starting payment for '{0}' {1}.".format(donor, donation_type_message))

        # Safety check to ensure the modifications to the donations in the recurring result in an Order total that
        # matches the RecurringDirectDebitPayment.
        if donor.amount != Decimal(recurring_order.amount):
            error_message = "Monthly donation amount: {0} does not equal recurring Order amount: {1} for '{2}'. Not processing this recurring donation.".format(
                donor.amount, recurring_order.amount, donor)
            logger.error(error_message)
            recurring_donation_errors.append(RecurringDonationError(donor, error_message))
            continue

    logger.info("")
    logger.info("Recurring Donation Preparing Summary")
    logger.info("=====================================")
    logger.info("")
    logger.info("Total number of recurring donations: {0}".format(donor_queryset.count()))
    logger.info("Number of recurring Orders successfully processed: {0}".format(donation_count))
    logger.info("Number of errors: {0}".format(len(recurring_donation_errors)))
    logger.info("Number of skipped payments: {0}".format(len(skipped_recurring_payments)))

    if len(recurring_donation_errors) > 0:
        logger.info("")
        logger.info("")
        logger.info("Detailed Error List")
        logger.info("===================")
        logger.info("")
        for error in recurring_donation_errors:
            logger.info("RecurringDirectDebitPayment: {0} {1}".format(error.recurring_payment.id, error.recurring_payment))
            logger.info("Error: {0}".format(error.error_message))
            logger.info("--")

    if len(skipped_recurring_payments) > 0:
        logger.info("")
        logger.info("")
        logger.info("Skipped Recurring Payments")
        logger.info("==========================")
        logger.info("")
        for skipped_payment in skipped_recurring_payments:
            logger.info("RecurringDirectDebitPayment: {0} {1}".format(skipped_payment.recurring_payment.id, skipped_payment.recurring_payment))
            for closed_order in skipped_payment.orders:
                logger.info("Order Number: {0}".format(closed_order.id))
                logger.info("--")

Example 146

Project: deprecated-version-OIPA-v2 Source File: import_iati_xml.py
    def _save_activity(self, el):
        # ====================================================================
        # IDENTIFICATION
        # ====================================================================

        # get_or_create >
        # Organisation(models.Model)
        # --------------------------------------------------------------------

        self.currency = el.get('default-currency', None)

        reporting_organisation_name = el['reporting-org']
        reporting_organisation_ref = el['reporting-org'].get('ref')
        reporting_organisation_type = el['reporting-org'].get('type')

        try:
            organisation = Organisation.objects.get(
                                                    ref=reporting_organisation_ref
                                                )

        except Organisation.DoesNotExist:
            if reporting_organisation_type:
                try:
                    reporting_organisation_type = int(reporting_organisation_type)
                    organisation = Organisation.objects.create(
                        ref=reporting_organisation_ref,
                        org_name=reporting_organisation_name,
                        type=reporting_organisation_type
                    )
                except ValueError:
                    # reverse lookup
                    for k, v in ORGANISATION_TYPE_CHOICES:
                        if reporting_organisation_type == v:
                            reporting_organisation_type = k
                    organisation = Organisation.objects.create(
                                                            ref=reporting_organisation_ref,
                                                            org_name=reporting_organisation_name,
                                                            type=reporting_organisation_type
                                                        )
            else:
                organisation = Organisation.objects.create(
                                                        ref=reporting_organisation_ref,
                                                        org_name=reporting_organisation_name,
                                                    )

        # get_or_create >
        # IATIActivity(models.Model)
        # --------------------------------------------------------------------

        iati_identifier = fix_whitespaces(str(el['iati-identifier']))
        iati_identifier = ''.join(iati_identifier.split()) # make sure all whitespace is trimmed
        date_updated = self._parse_date(el.get('last-updated-datetime', str(datetime.now().date())))
        iati_activity, created = IATIActivity.objects.get_or_create(
                                     iati_identifier=iati_identifier,
                                     reporting_organisation=organisation,
                                 )
        iati_activity.date_updated = date_updated

#        if not self.force_update and iati_activity.date_updated >= date_updated:
#            print "WARNING | This record already exists. Use --force-update to override."
#            return

        # ====================================================================
        # BASIC ACTIVITY INFORMATION
        # ====================================================================

        # get_or_create >
        # IATIActivityTitle(models.Model)
        # @todo
        # type
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "=========================="
            print "Running tests..."
            print "=========================="
            print "setting title"
        iati_activity.iatiactivitytitle_set.all().delete()
        iati_activity_title = unicode(el.title).encode('UTF-8')
        iati_activity_title_type = el['title'].get('type')
        iati_activity_title_language = str(el['title'].get('{http://www.w3.org/XML/1998/namespace}lang', 'default')).lower()

        activity_title, created = IATIActivityTitle.objects.get_or_create(
                                      iati_activity=iati_activity,
                                      title=iati_activity_title
                                  )
        if iati_activity_title_language:
            activity_title.language = Language.objects.get_or_create(
                                          code=iati_activity_title_language
                                      )[0]
            activity_title.save()

        # get_or_create >
        # IATIActivityDescription(models.Model)
        # @todo
        # type
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting description"
        iati_activity.iatiactivitydescription_set.all().delete()
        if hasattr(el, 'description'):
            iati_activity_description = unicode(el.description).encode('UTF-8')
            iati_activity_description_type = el['description'].get('type')
            iati_activity_description_language = str(el['description'].get('{http://www.w3.org/XML/1998/namespace}lang', 'default')).lower()

            activity_description, created = IATIActivityDescription.objects.get_or_create(
                                                iati_activity=iati_activity,
                                                description=iati_activity_description
                                            )
            if iati_activity_description_language:
                activity_description.language = Language.objects.get_or_create(
                                                    code=iati_activity_description_language
                                                )[0]
                activity_description.save()

        # get_or_create >
        # ActivityStatusType(models.Model)
        # @todo
        # description & language
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting activity-status"
        if hasattr(el, 'activity-status'):
            activity_status_name = unicode(el['activity-status'])
            activity_status_code = el['activity-status'].get('code')
            activity_status = None

            if activity_status_code:
                activity_status, created = ActivityStatusType.objects.get_or_create(
                                               code=activity_status_code
                                           )
            else:
                if activity_status_name:
                    activity_status, created = ActivityStatusType.objects.get_or_create(
                                                   name=str(activity_status_name).capitalize()
                                               )

            iati_activity.activity_status = activity_status
            iati_activity.save() # todo

        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting activity-dates"
        if hasattr(el, 'activity-date'):
            for activity_date in el['activity-date']:
                if activity_date.get('iso-date'):
                    if activity_date.get('type') == 'start-planned':
                        iati_activity.start_planned = self._parse_date(activity_date.get('iso-date'))
                    elif activity_date.get('type') == 'start-actual':
                        iati_activity.start_actual = self._parse_date(activity_date.get('iso-date'))
                    elif activity_date.get('type') == 'end-planned':
                        iati_activity.end_planned = self._parse_date(activity_date.get('iso-date'))
                    elif activity_date.get('type') == 'end-actual':
                        iati_activity.end_actual = self._parse_date(activity_date.get('iso-date'))
                    iati_activity.save()

        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting activity-contacts"
        iati_activity.iatiactivitycontact_set.all().delete()
        if hasattr(el, 'contact-info'):
            iati_activity_contact = IATIActivityContact.objects.create(
                                        iati_activity=iati_activity
                                    )
            if hasattr(el['contact-info'], 'organisation'):
                iati_activity_contact.organisation = unicode(el['contact-info']['organisation']).encode('UTF-8')
            if hasattr(el['contact-info'], 'telephone'):
                iati_activity_contact.telephone = unicode(el['contact-info']['telephone']).encode('UTF-8')
            if hasattr(el['contact-info'], 'email'):
                iati_activity_contact.email = unicode(el['contact-info']['email']).encode('UTF-8')
            if hasattr(el['contact-info'], 'mailing-address'):
                iati_activity_contact.mailing_address = unicode(el['contact-info']['mailing-address']).encode('UTF-8')
            iati_activity_contact.save()

        # ====================================================================
        # PARTICIPATING ORGANISATIONS
        # ====================================================================

        # get_or_create >
        # ParticipatingOrganisation(models.Model)
        # @todo
        # org_name_lang
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting participating-orgs"
        iati_activity.participatingorganisation_set.all().delete()
        if hasattr(el, 'participating-org'):
            for participating_org in el['participating-org']:
                self._save_participating_org(participating_org, iati_activity)

        # ====================================================================
        # GEOPOLITICAL INFORMATION
        # ====================================================================

        # get_or_create >
        # IATIActivityCountry(models.Model)
        # @todo
        # lang
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting recipient-country"
        iati_activity.iatiactivitycountry_set.all().delete()
        if hasattr(el, 'recipient-country'):
            for recipient_country in el['recipient-country']:
                self._save_recipient_country(recipient_country, iati_activity)

        # get_or_create >
        # IATIActivityRegion(models.Model)
        # @todo
        # lang
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting recipient-region"

        iati_activity.iatiactivityregion_set.all().delete()
        if hasattr(el, 'recipient-region'):
            for recipient_region in el['recipient-region']:
                self._save_recipient_region(recipient_region, iati_activity)

        # ====================================================================
        # CLASSIFICATIONS
        # ====================================================================

        # get_or_create >
        # IATIActivitySector(models.Model)
        # @todo
        # percentage
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting activity-sectors"
        iati_activity.sectors.all().delete()
        if hasattr(el, 'sector'):
            for sector in el.sector:
                self._save_sector(sector, iati_activity)

        # get_or_create >
        # IATIActivityPolicyMarker(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting policy-markers"
        iati_activity.iatiactivitypolicymarker_set.all().delete()
        if hasattr(el, 'policy-marker'):
            for policy_marker in el['policy-marker']:
                if policy_marker.get('significance'):
                    try:
                        if int(policy_marker.get('significance')) in range(1, 4):
                            self._save_policy_marker(policy_marker, iati_activity)
                    except ValueError:
                        pass

        # get_or_create >
        # CollaborationType(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting collaboration-type"
        if hasattr(el, 'collaboration-type'):
            collaboration_type_code = el['collaboration-type'].get('code')
            if collaboration_type_code:
                iati_activity.collaboration_type = CollaborationType.objects.get_or_create(
                                                       code=collaboration_type_code
                                                   )[0]

        # get_or_create >
        # FlowType(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting default-flow-type"
        if hasattr(el, 'default-flow-type'):
            # todo catch typo
            try:
                iati_activity.default_flow_type = FlowType.objects.get_or_create(
                                                      code=int(el['default-flow-type'].get('code'))
                                                  )[0]
                iati_activity.save()
            except ValueError:
                default_flow_type = str(el['default-flow-type']).replace(' ', '_').replace('-', '_').upper()
                try:
                    iati_activity.default_flow_type = FlowType.objects.get_or_create(
                                                          code=int(default_flow_type)
                                                      )[0]
                    iati_activity.save()
                except ValueError:
                    match = None
                    for k, v in FLOW_TYPE_CHOICES_MAP:
                        if k == default_flow_type:
                            match = v
                    if match:
                        iati_activity.default_flow_type = FlowType.objects.get_or_create(
                                                              code=match
                                                          )[0]
                        iati_activity.save()
                    else:
                        pass
#                        e = "ValueError: Unsupported vocabulary_type '"+str(iati_activity_sector_vocabulary_type)+"' in VOCABULARY_CHOICES_MAP"
#                        raise Exception(e)

        # get_or_create >
        # FinanceType(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting default-finance-type"
        if hasattr(el, 'default-finance-type'):
            try:
                iati_activity.default_finance_type = FinanceType.objects.get_or_create(
                                                         code=int(el['default-finance-type'].get('code'))
                                                     )[0]
                iati_activity.save()
            except ValueError:
                pass

        # get_or_create >
        # AidType(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting default-aid-type"
        if hasattr(el, 'default-aid-type'):
            aid_type_code = el['default-aid-type'].get('code')
            if aid_type_code:
                iati_activity.default_aid_type = AidType.objects.get_or_create(
                    code=aid_type_code
                )[0]

        # get_or_create >
        # TiedAidStatus(models.Model)
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting default-tied-status"
        if hasattr(el, 'default-tied-status'):
            tied_aid_status = el['default-tied-status'].get('code')
            try:
                if int(tied_aid_status) in range(3, 6):
                    iati_activity.default_tied_status_type = TiedAidStatusType.objects.get_or_create(
                                                         code=int(tied_aid_status)
                                                     )[0]
                    iati_activity.save()
            except ValueError:
                pass

        # ====================================================================
        # FINANCIAL
        # ====================================================================

        # get_or_create >
        # IATIActivityBudget(models.Model)
        # @todo
        # type, currency, lang
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting activity-budgets"
        iati_activity.iatiactivitybudget_set.all().delete()
        if hasattr(el, 'budget'):
            total_budget = 0
            iati_statistics, created = ActivityStatistics.objects.get_or_create(
                                                                      iati_identifier=iati_activity
                                                                  )
            iati_statistics.total_budget = Decimal(total_budget)
            for budget in el.budget:
                if hasattr(budget, 'value'):
                    value = str(getattr(budget, 'value')).replace(',', '.')
                    total_budget += int(value)
                self._save_budget(budget, iati_activity)
            iati_statistics.total_budget = Decimal(str(total_budget))
            iati_statistics.save()


        # ====================================================================
        # TRANSACTION
        # ====================================================================

        # get_or_create >
        # Transaction(models.Model)
        # @todo
        # type, currency, lang
        # --------------------------------------------------------------------

        if PARSER_DEBUG:
            print "setting transactions"
        iati_activity.iatitransaction_set.all().delete()
        if hasattr(el, 'transaction'):
            for transaction in el.transaction:
                self._save_transaction(transaction, iati_activity, organisation)
        if PARSER_DEBUG:
            print "=========================="
            print "All tests passed...!"
            print "=========================="

        # ====================================================================
        # RELATED DOCUMENTS
        # ====================================================================

        iati_activity.iatiactivitydocuement_set.all().delete()
        if hasattr(el, 'docuement-link'):
            for docuement in el['docuement-link']:
                if docuement.get('url'):
                    iati_docuement = IATIActivityDocuement.objects.create(
                        iati_activity=iati_activity,
                        url=docuement.get('url')
                    )
                    if docuement.get('format'):
                        iati_docuement.format = docuement.get('format')
                        iati_docuement.save()

Example 147

Project: orisi Source File: main.py
def timelock(args):
  if len(args) < 2:
    print "USAGE: `%s timelock <locktime_minutes> <return_address>`" % START_COMMAND
    return

  return_address = args[1]

  if CHARTER_URL != None:
    print "fetching charter: %s" % CHARTER_URL
  else:
    print "using built-in charter"

  charter = fetch_charter()

  oracle_pubkeys = []
  oracle_fees = {}
  oracle_bms = []
  oracle_fastcasts = []

  sum_fees_satoshi = Decimal(charter['org_fee']) * 100000000

  for o in charter['nodes']:
    oracle_pubkeys.append(o['pubkey'])
    oracle_fees[o['address']] = o['fee']
    oracle_fastcasts.append(o['fastcast'])
    sum_fees_satoshi += Decimal(o['fee']) * 100000000
  
  min_sigs = int(ceil(float(len(oracle_pubkeys))/2))

  print "number of nodes: %i" % len(charter['nodes'])
  print "required signatures: %i" % min_sigs

  oracle_fees[charter['org_address']] = charter['org_fee']

  key_list = oracle_pubkeys

  request = {}
  msig_addr = return_address

  request['message_id'] = "%s-%s" % (msig_addr, str(randrange(1000000000,9000000000)))
  request['pubkey_list'] = key_list

  request['miners_fee_satoshi'] = charter['miners_fee_satoshi']
  request['locktime'] = time.time() + int(args[0])*60
  request['return_address'] = return_address
  request['oracle_fees'] = oracle_fees
  request['req_sigs'] = min_sigs
  request['operation'] = 'safe_timelock_create'

  pub, priv = generateKey()

  meta_request = {}
  meta_request['source'] = pub
  meta_request['channel'] = 0
  meta_request['epoch'] = time.mktime(datetime.datetime.utcnow().timetuple())
  meta_request['body'] = json.dumps(request)

  sendMessage(constructMessage(priv, **meta_request))

  print ""
  print "request sent. awaiting oracle replies..."
  print "need at least %r of %r oracles to reply to the request to proceed" % (min_sigs, len(charter['nodes']))
  print "if the request takes over 30 seconds to process, it means that some of the oracles might be offline - contact [email protected] ."
  print ""

  suffix = None
  msig_addr = None
  contract_id = None
  confirmation_count = 0

  while suffix is None:
    msgs = getMessages()
    for m in msgs['results']:
      try:
        body = json.loads(m['body'])
      except:
        logging.exception('fastcast: wrong body for frame_id %r ; ignoring' % m)
        continue

      if not 'in_reply_to' in body:
        continue

      if body['in_reply_to'] == request['message_id']:
        if m['source'] in oracle_fastcasts:
          oracle_fastcasts.remove(m['source'])

          if 'operation' in body:
            if body['operation'] == 'safe_timelock_error':
              print "Operation error! One of the oracles reports:"
              print body['comment']
              return

          print "received confirmation from %r" % m['source']

          if suffix == None:
            suffix = body['mark']
            msig_addr = body['addr']
            contract_id = body['contract_id']
          else:
            if (msig_addr != body['addr']) or (suffix != body['mark']) or (contract_id != body['contract_id']):
              logging.error('Oracles didn\'t agree on the timelock address or the marker. Is one of them running a different code?')
              logging.error('Please investigate.')
              return

            confirmation_count += 1
            print "Oracle confirmations: %r of %r required" % (confirmation_count, min_sigs)
      
  print ""
  print "You can now send bitcoin to this address: %s and it will be locked for %r minutes from now." % (msig_addr, int(args[0]))
  print "IMPORTANT:   the amount you send needs to end with %r satoshi." % suffix
  print "             e.g. if you want to lock in BTC 0.00030000, you have to send 0.0003%r" % suffix
  print "  qr code:   http://www.btcfrog.com/qr/bitcoinPNG.php?address=%s&amount=0.0003%r&label=timelock" % (msig_addr, suffix)
  print " monitoring: https://blockchain.info/address/%s" % msig_addr
  print ""
  print "FEES: oracle & org fees: %i satoshi (as detailed in the charter)" % sum_fees_satoshi
  print "      miners fee: %i satoshi (yes, it's high - we want to encourage more pools to accept msig)" % charter['miners_fee_satoshi']
  print ""
  print "awaiting further oracle communication regarding this contract...."
  print "(contract_id: %s)" % contract_id
  print ""


  read_messages = []
  while True:
    msgs = getMessages()
    for m in msgs['results']:
      if m['frame_id'] in read_messages:
        continue
      read_messages.append(m['frame_id'])

      try:
        body = json.loads(m['body'])
      except:
        logging.exception('fastcast: wrong body for frame_id %r ; ignoring' % m)
        continue

      if not "contract_id" in body:
        continue

      if body['contract_id'] != contract_id:
        continue

      print body

Example 148

Project: labuildings Source File: convert.py
def convert(buildingsFile, osmOut):
    with open(buildingsFile) as f:
        features = json.load(f)
    allAddresses = {}
    buildings = []
    buildingShapes = []
    buildingIdx = index.Index()

    # Returns the coordinates for this address
    def keyFromAddress(address):
        return str(address['geometry']['coordinates'][0]) + "," + str(address['geometry']['coordinates'][1])

    for feature in features:
        if feature['geometry']['type'] == 'Polygon' or feature['geometry']['type'] == 'MultiPolygon':
            extra_tags = osm_tags.get_osm_tags(feature)
            feature['properties']['osm'] = extra_tags
            buildings.append(feature)
            shape = asShape(feature['geometry'])
            buildingShapes.append(shape)
            buildingIdx.add(len(buildingShapes) - 1, shape.bounds)

        # These are the addresses that don't overlap any buildings
        elif feature['geometry']['type'] == 'Point':
            # The key is the coordinates of this address. Track how many addresses share these coords.
            key = keyFromAddress(feature)
            if key in allAddresses:
                allAddresses[key].append(feature)
            else:
                allAddresses[key] = [feature]

        else:
            print "geometry of unknown type:", feature['geometry']['type']

    # Generates a new osm id.
    osmIds = dict(node = -1, way = -1, rel = -1)
    def newOsmId(type):
        osmIds[type] = osmIds[type] - 1
        return osmIds[type]

    ## Formats multi part house numbers
    def formatHousenumber(p):
        def suffix(part1, part2, hyphen_type=None):
            #part1 = stripZeroes(part1)
            if not part2:
                return str(part1)
            #part2 = stripZeroes(part2)
            return str(part1) + ' ' + str(part2)
        #def stripZeroes(addr): # strip leading zeroes from numbers
        #    if addr.isdigit():
        #        addr = str(int(addr))
        #    if '-' in addr:
        #        try:
        #            addr2 = addr.split('-')
        #            if len(addr2) == 2:
        #                addr = str(int(addr2[0])) + '-' + str(int(addr2[1])).zfill(2)
        #        except:
        #            pass
        #    return addr
        number = suffix(p['Number'], p['NumSuffix'])
        if p['NumPrefix']:
            number = p['NumPrefix'] + number
        return number

    # Converts an address
    def convertAddress(address):
        result = dict()
        if all (k in address for k in ('Number', 'StreetName')):
            if address['Number']:
                result['addr:housenumber'] = formatHousenumber(address)
            if address['StreetName']:

                # Titlecase
                streetname = address['StreetName'].title()
                if address['StArticle']:
                    streetname = address['StArticle'].title() + " " + streetname
                if address['PreType']:
                    streetname = address['PreType'].title() + " " + streetname
                if address['PreDir']:
                    streetname = address['PreDir'].title() + " " + streetname
                if address['PreMod']:
                    streetname = address['PreMod'].title() + " " + streetname
                if address['PostType']:
                    streetname = streetname + " " + address['PostType'].title()
                if address['PostDir']:
                    streetname = streetname + " " + address['PostDir'].title()
                if address['PostMod']:
                    streetname = streetname + " " + address['PostMod'].title()

                # Fix titlecase on 1St, 2Nd, 3Rd, 4Th, etc
                streetname = re.sub(r"(.*)(\d+)St\s*(.*)", r"\1\2st \3", streetname)
                streetname = re.sub(r"(.*)(\d+)Nd\s*(.*)", r"\1\2nd \3", streetname)
                streetname = re.sub(r"(.*)(\d+)Rd\s*(.*)", r"\1\2rd \3", streetname)
                streetname = re.sub(r"(.*)(\d+)Th\s*(.*)", r"\1\2th \3", streetname)

                # Expand 'St ' -> 'Saint'
                # relevant for:
                #   'St Clair'
                #   'St Louis'
                #   'St James'
                #   'St James Park'
                #   'St Andrews'
                #   'St Nicolas'
                #   'St Cloud'
                #   'St Ambrose'
                #   'St Bonaventure'
                #   'St Joseph'
                #   'St Tropez'
                if streetname[0:3] == 'St ': streetname = 'Saint ' + streetname[3:]
                # Middle name expansions
                streetname = streetname.replace(' St ', ' Street ')
                streetname = streetname.replace(' Rd ', ' Road ')
                streetname = streetname.replace(' Blvd ', ' Boulevard ')
                result['addr:street'] = streetname
            if address['PCITY1']:
                result['addr:city'] = address['PCITY1'].title()
            elif address['LegalComm']:
                result['addr:city'] = address['LegalComm'].title()
            if address['ZipCode']:
                result['addr:postcode'] = str(int(address['ZipCode']))
            if address['UnitName']:
                result['addr:unit'] = address['UnitName']
        return result

    # Distills coincident addresses into one address where possible.
    # Takes an array of addresses and returns an array of 1 or more addresses
    def distillAddresses(addresses):
        # Only distill addresses if the following conditions are true:
        # 1) the addresses share the same coordinates.
        # AND
        # 2a) all the attributes are the same _except_ the unit number/name
        # OR
        # 2b) the street number is the same but the street names are referring to the same thing

        outputAddresses = []

        # First, group the addresses into separate lists for each unique location
        addressesByCoords = {}
        for address in addresses:
            key = keyFromAddress(address)
            if key in addressesByCoords:
                addressesByCoords[key].append(address)
            else:
                addressesByCoords[key] = [address]

        # loop over unique coordinates
        for key in addressesByCoords:
            # Here see if we can collapse any of these addresses at the same coords.

            # addressesByCoords[key] is an array of addresses at this location.

            # We are only looking for the 2 possibilities above (2a) and (2b).
            # If the situation is more complicated, change nothing.
            outputAddresses.extend(distillAddressesAtPoint(addressesByCoords[key]))

        return outputAddresses

    # This function is called by distillAddresses.
    # It assumes all addresses are at the same coordinates.
    # Returns an array of 1 or more addresses
    def distillAddressesAtPoint(addresses):

        if len(addresses) == 1:
            return addresses

        firstAddress = addresses[0]

        # (2a) If the first address is an apartment, see if all the rest are too.

        # NOTE: sometimes an apartment building has a few address points that lack a UnitName...
        # ...so checking for the presence of UnitName in firstAddress wouldn't always work.
        props = firstAddress['properties']
        if debug: print "Testing to see if these are apartments...", '\t'.join([str(props['Number']), str(props['NumSuffix']), str(props['PreType']), str(props['StreetName']), str(props['PostType']), str(props['UnitName'])])
        # Compare subsequent addresses in the array to the first address.
        # Hence, range starts at 1.
        for i in range(1, len(addresses)):
            if not areSameAddressExceptUnit(firstAddress, addresses[i]):
                props = addresses[i]['properties']
                if debug: print "No, this address was different...........", '\t'.join([str(props['Number']), str(props['NumSuffix']), str(props['PreType']), str(props['StreetName']), str(props['PostType']), str(props['UnitName'])])
                #print firstAddress
                #print addresses[i]
                break
            # else, keep going

        else: # else for the `for` statement. Executes only if `break` never did.
            # We checked them all, and they're all the same except UnitName.
            # In this case the apartment data is useless to OSM because the
            # apartment nodes are all on top of each other.
            # So, discard the unit information and return just one address.
            firstAddress['properties']['UnitName'] = None
            if debug: print "Yes they were apartments! Collapsed", len(addresses), "into one"
            return [firstAddress]

        # (2b) Check if the street number is all the same.
        # For this, we use a list of alternative names (like HWY 1, etc)...
        # ...and we need to know which canonical name to keep.
        if debug: print "Testing to see if the street names are synonyms.."
        canonicalStreetName = None
        for i in range(1, len(addresses)):
            props = addresses[i]['properties']
            if not areSameAddressExceptStreet(firstAddress, addresses[i]):
                if debug: print "No, this address was different...........", '\t'.join([str(props['Number']), str(props['NumSuffix']), str(props['PreType']), str(props['StreetName']), str(props['PostType']), str(props['UnitName'])])
                #print firstAddress
                #print addresses[i]
                break
            compoundStreetName = (str(props['PreType']),str(props['StreetName']),str(props['PostType']))
            currentCanonicalStreetName = getCanonicalName(compoundStreetName)
            if currentCanonicalStreetName:
                if debug: print "found canonical name", currentCanonicalStreetName
                if ((currentCanonicalStreetName == canonicalStreetName) or (canonicalStreetName == None)):
                    canonicalStreetName = currentCanonicalStreetName
                else:
                    if debug: print "canonicalStreetNames didn't match:", canonicalStreetName, currentCanonicalStreetName
                    break
            else:
                print "couldn't find canonicalStreetName for", compoundStreetName
                break

        else: # else for the `for` statement. Executes only if `break` never did.
            # We checked them all, and they're all the same except StreetName.
            # If we can determine that they are all the same synonym, we can
            # overwrite the other streetname information and return just one address.
            firstAddress['properties']['PreType'] = canonicalStreetName[0]
            firstAddress['properties']['StreetName'] = canonicalStreetName[1]
            firstAddress['properties']['PostType'] = canonicalStreetName[2]
            if debug: print "Yes they were synonyms! Collapsed", len(addresses), "into one"
            return [firstAddress]

        # This is only excuted if neither of the two `else` statements executed 
        # for the two `for` statements above. That means we were unable to collapse
        # separate apartments into one, or collapse synonymous street names into one.
        # So, instead of returning just one address, we fail and return all of them.
        return addresses

    def areSameAddressExceptUnit(a1, a2):
        for key in ['NumPrefix', 'Number', 'NumSuffix', 'PreMod', 'PreDir', 'PreType', 'StArticle', 'StreetName', 'PostType', 'PostDir', 'PostMod', 'ZipCode', 'LegalComm', 'PCITY1']:
            if a1['properties'][key] != a2['properties'][key]:
                #print key, a1['properties'][key], "!=", a2['properties'][key]
                return False
        return True

    def areSameAddressExceptStreet(a1, a2):
        for key in ['NumPrefix', 'Number', 'NumSuffix', 'PreMod', 'PreDir', 'StArticle', 'UnitName', 'PostDir', 'PostMod', 'ZipCode', 'LegalComm', 'PCITY1']:
            if a1['properties'][key] != a2['properties'][key]:
                #print key, a1['properties'][key], "!=", a2['properties'][key]
                return False
        return True

    # Sometimes we have identical addresses that differ only by street name.
    # Usually these are because the street name is also a highway. We want to 
    # remove all the highway names and only use the street name for the address
    canonicalNames = {
        ("None", "LINCOLN", "BOULEVARD"): (None, "LINCOLN", "BOULEVARD"),
        ("ROUTE", "1", "None"): (None, "LINCOLN", "BOULEVARD"),
        ("HIGHWAY", "1", "None"): (None, "LINCOLN", "BOULEVARD"),
        ("None", "SR-1", "None"): (None, "LINCOLN", "BOULEVARD"),
        ("None", "PCH", "None"): (None, "LINCOLN", "BOULEVARD"),
    }

    def getCanonicalName(compoundStreetName):
        result = None
        try:
            result = canonicalNames[compoundStreetName]
        except KeyError:
            return None
        return result

    # Appends new node or returns existing if exists.
    nodes = {}
    def appendNewNode(coords, osmXml):
        rlon = int(float(coords[0]*10**7))
        rlat = int(float(coords[1]*10**7))
        if (rlon, rlat) in nodes:
            return nodes[(rlon, rlat)]
        node = etree.Element('node', visible = 'true', id = str(newOsmId('node')))
        node.set('lon', str(Decimal(coords[0])*Decimal(1)))
        node.set('lat', str(Decimal(coords[1])*Decimal(1)))
        nodes[(rlon, rlat)] = node
        osmXml.append(node)
        return node

    # Sometimes we want to force overlapping nodes, such as with addresses.
    # This way they'll show up in JOSM and the contributor can deal with them manually.
    # Otherwise, we might try to apply multiple address tags to the same node...
    # ...which is also incorrect, but harder to detect.
    def appendNewNodeIgnoringExisting(coords, osmXml):
        rlon = int(float(coords[0]*10**7))
        rlat = int(float(coords[1]*10**7))
        #if (rlon, rlat) in nodes:
        #    return nodes[(rlon, rlat)]
        node = etree.Element('node', visible = 'true', id = str(newOsmId('node')))
        node.set('lon', str(Decimal(coords[0])*Decimal(1)))
        node.set('lat', str(Decimal(coords[1])*Decimal(1)))
        nodes[(rlon, rlat)] = node
        osmXml.append(node)
        return node

    def appendNewWay(coords, intersects, osmXml):
        way = etree.Element('way', visible='true', id=str(newOsmId('way')))
        firstNid = 0
        for i, coord in enumerate(coords):
            if i == 0: continue # the first and last coordinate are the same
            node = appendNewNode(coord, osmXml)
            if i == 1: firstNid = node.get('id')
            way.append(etree.Element('nd', ref=node.get('id')))

            # Check each way segment for intersecting nodes
            int_nodes = {}
            try:
                line = LineString([coord, coords[i+1]])
            except IndexError:
                line = LineString([coord, coords[1]])
            for idx, c in enumerate(intersects):
                if line.buffer(0.000001).contains(Point(c[0], c[1])) and c not in coords:
                    t_node = appendNewNode(c, osmXml)
                    for n in way.iter('nd'):
                        if n.get('ref') == t_node.get('id'):
                            break
                    else:
                        int_nodes[t_node.get('id')] = Point(c).distance(Point(coord))
            for n in sorted(int_nodes, key=lambda key: int_nodes[key]): # add intersecting nodes in order
                way.append(etree.Element('nd', ref=n))
            
        way.append(etree.Element('nd', ref=firstNid)) # close way
        osmXml.append(way)
        return way

    # Appends an address to a given node or way.
    def appendAddress(address, element):
    #    # Need to check if these tags already exist on this element
        for k, v in convertAddress(address['properties']).iteritems():
            # TODO: is this doing anything useful?
            #for child in element:
            #    if child.tag == 'tag':
            #        #print k, v
            #        if child.attrib.get('k') == k:
            #            print "found key", k
            #            if child.attrib.get('v') == v:
            #                print "found matching value", v
           element.append(etree.Element('tag', k=k, v=v))

    # Appends a building to a given OSM xml docuement.
    def appendBuilding(building, shape, address, osmXml):
        # Check for intersecting buildings
        intersects = []
        for i in buildingIdx.intersection(shape.bounds):
            try:
                for c in buildingShapes[i].exterior.coords:
                    if Point(c[0], c[1]).buffer(0.000001).intersects(shape):
                        intersects.append(c)
            except AttributeError:
                for c in buildingShapes[i][0].exterior.coords:
                    if Point(c[0], c[1]).buffer(0.000001).intersects(shape):
                        intersects.append(c)

        # Export building, create multipolygon if there are interior shapes.
        interiors = []
        try:
            way = appendNewWay(list(shape.exterior.coords), intersects, osmXml)
            for interior in shape.interiors:
                interiors.append(appendNewWay(list(interior.coords), [], osmXml))
        except AttributeError:
            way = appendNewWay(list(shape[0].exterior.coords), intersects, osmXml)
            for interior in shape[0].interiors:
                interiors.append(appendNewWay(list(interior.coords), [], osmXml))
        if len(interiors) > 0:
            relation = etree.Element('relation', visible='true', id=str(newOsmId('way')))
            relation.append(etree.Element('member', type='way', role='outer', ref=way.get('id')))
            for interior in interiors:
                relation.append(etree.Element('member', type='way', role='inner', ref=interior.get('id')))
            relation.append(etree.Element('tag', k='type', v='multipolygon'))
            osmXml.append(relation)
            way = relation
        for tag in building['properties']['osm']:
            value = building['properties']['osm'][tag]
            way.append(etree.Element('tag', k=tag, v=value))
        # if 'GeneralUse' in building['properties']:
        #     way.append(etree.Element('tag', k='building', v=building['properties']['GeneralUse']))
        # else:
        #     way.append(etree.Element('tag', k='building', v='yes'))
        # if 'SpecificUs' in building['properties']:
        #     way.append(etree.Element('tag', k='building:use', v=building['properties']['GeneralUse']))
        if 'YearBuilt' in building['properties'] and building['properties']['YearBuilt'] is not None:
            YearBuilt = int(building['properties']['YearBuilt'])
            if YearBuilt > 0:
                    way.append(etree.Element('tag', k='start_date', v=str(YearBuilt)))
        # if 'Specific_1' in building['properties']:
        #         way.append(etree.Element('tag', k='amenity', v=building['properties']['Specific_1']))
        if 'Units' in building['properties'] and building['properties']['Units'] is not None:
            units = int(round(float(building['properties']['Units']), 0))
            if units > 0:
                way.append(etree.Element('tag', k='building:units', v=str(units)))
        if 'HEIGHT' in building['properties']:
            height = round(((building['properties']['HEIGHT'] * 12) * 0.0254), 1)
            if height > 0:
                way.append(etree.Element('tag', k='height', v=str(height)))
        if 'ELEV' in building['properties']:
            elevation = round(((building['properties']['ELEV'] * 12) * 0.0254), 1)
            if elevation > 0:
                way.append(etree.Element('tag', k='ele', v=str(elevation)))
        if 'BLD_ID' in building['properties']:
            way.append(etree.Element('tag', k='lacounty:bld_id', v=str(building['properties']['BLD_ID'])))
        if 'AIN' in building['properties'] and building['properties']['AIN'] is not None:
            way.append(etree.Element('tag', k='lacounty:ain', v=str(building['properties']['AIN'])))
#        if address:
#            appendAddress(address, way)

    # Export buildings & addresses. Only export address with building if there is exactly
    # one address per building. Export remaining addresses as individual nodes.
    # The remaining addresses are added to a dictionary hashed by their coordinates.
    # This way we catch any addresses that have the same coordinates.
    osmXml = etree.Element('osm', version='0.6', generator='[email protected]')
    for i in range(0, len(buildings)):

        buildingAddresses = []
        for address in buildings[i]['properties']['addresses']:
            buildingAddresses.append(address)
        address = None
        if len(buildingAddresses) == 1:
            # There's only one address in the building footprint
            address = buildingAddresses[0]
        elif len(buildingAddresses) > 1:
            # If there are multiple addresses, first try to distill them.
            # If we can distill them to one address, we can still add it to this building.
            distilledAddresses = distillAddresses(buildingAddresses)
            if len(distilledAddresses) == 1:
                # We distilled down to one address. Add it to the building.
                address = distilledAddresses[0]
            else:
                # We could not distilled down to one address. Instead export as nodes.
                for address in distilledAddresses:
                    # The key is the coordinates of this address. Track how many addresses share these coords.
                    key = keyFromAddress(address)
                    if key in allAddresses:
                        allAddresses[key].append(address)
                    else:
                        allAddresses[key] = [address]

        appendBuilding(buildings[i], buildingShapes[i], address, osmXml)


    # Export any addresses that aren't the only address for a building.
    if (len(allAddresses) > 0):

        # Iterate over the list of distinct coordinates found in the address data
        for coordskey in allAddresses:
            # if a distinct coordinate has only one associated address,
            # then export that address as a new node
            if len(allAddresses[coordskey]) == 1:
                address = allAddresses[coordskey][0]
                coordinates = address['geometry']['coordinates']
#                node = appendNewNode(coordinates, osmXml) # returns old node if one exists at these coords
#                appendAddress(address, node)

            # If there is more than one address at these coordinates, do something.
            # ...but do what exactly?
            else:
                distilledAddresses = distillAddresses(allAddresses[coordskey])
                if len(distilledAddresses) == 1:
                    # We distilled down to one address. Append it.
                    address = distilledAddresses[0]
                    coordinates = address['geometry']['coordinates']
#                    node = appendNewNode(coordinates, osmXml) # returns old node if one exists at these coords
#                    appendAddress(address, node)
                else:
                    if debug: print "found duplicate coordinates that could not be distilled:", coordskey, "has", len(allAddresses[coordskey]), "addresses"
                    if debug: print '\t'.join(["num", "numsufx", "pretype", "street", "posttype", "unit"])
                    for address in distilledAddresses:
                        # TODO: do something smart here. These are overlapping addresses that we couldn't distill.
                        # TODO: maybe jitter them, or leave stacked but with FIXME?
                        # TODO: For now, we use appendNewNodeIgnoringExisting to pile the nodes on top of each other.
                        #print address
                        props = address['properties']
                        if debug: print '\t'.join([str(props['Number']), str(props['NumSuffix']), str(props['PreType']), str(props['StreetName']), str(props['PostType']), str(props['UnitName'])])
                        coordinates = address['geometry']['coordinates']
#                        node = appendNewNodeIgnoringExisting(coordinates, osmXml) # Force overlapping nodes so JOSM will catch them
#                        appendAddress(address, node)

    with open(osmOut, 'w') as outFile:
        outFile.writelines(tostring(osmXml, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
        print 'Exported ' + osmOut

Example 149

Project: nycbuildings Source File: convert.py
def convert(buildingsFile, osmOut):
    with open(buildingsFile) as f:
        buildings = json.load(f)
    buildingShapes = []
    buildingIdx = index.Index()
    for building in buildings:
        shape = asShape(building['geometry'])
        buildingShapes.append(shape)
        buildingIdx.add(len(buildingShapes) - 1, shape.bounds)

    # Generates a new osm id.
    osmIds = dict(node = -1, way = -1, rel = -1)
    def newOsmId(type):
        osmIds[type] = osmIds[type] - 1
        return osmIds[type]

    ## Formats multi part house numbers
    def formatHousenumber(p):
        def suffix(part1, part2, hyphen_type=None):
            part1 = stripZeroes(part1)
            if not part2:
                return str(part1)
            part2 = stripZeroes(part2)
            if hyphen_type == 'U': # unit numbers
                return part1 + '-' + part2
            if len(part2) == 1 and part2.isalpha(): # single letter extensions
                return part1 + part2
            return part1 + ' ' + part2 # All others
        def stripZeroes(addr): # strip leading zeroes from numbers
            if addr.isdigit():
                addr = str(int(addr))
            if '-' in addr:
                try:
                    addr2 = addr.split('-')
                    if len(addr2) == 2:
                        addr = str(int(addr2[0])) + '-' + str(int(addr2[1])).zfill(2)
                except:
                    pass
            return addr
        number = suffix(p['HOUSE_NUMB'], p['HOUSE_NU_1'], p['HYPHEN_TYP'])
        return number

    # Converts an address
    def convertAddress(address):
        result = dict()
        if all (k in address for k in ('HOUSE_NUMB', 'STREET_NAM')):
            if address['HOUSE_NUMB']:
                result['addr:housenumber'] = formatHousenumber(address)
            if address['STREET_NAM']:
                streetname = address['STREET_NAM'].title()
                streetname = streetname.replace('F D R ', 'FDR ')
                # Expand Service Road
                # See https://github.com/osmlab/nycbuildings/issues/30
                streetname = re.sub(r"(.*)\bSr\b(.*)", r"\1Service Road\2", streetname)
                # Expand cardinal directions on Service Roads
                streetname = re.sub(r"(.*\bService Road\s)\bN\b(.*)", r"\1North\2", streetname)
                streetname = re.sub(r"(.*\bService Road\s)\bE\b(.*)", r"\1East\2", streetname)
                streetname = re.sub(r"(.*\bService Road\s)\bS\b(.*)", r"\1South\2", streetname)
                streetname = re.sub(r"(.*\bService Road\s)\bW\b(.*)", r"\1West\2", streetname)
                # Expand Expressway on Service Roads
                streetname = re.sub(r"(.*)Expwy\s\bN\b(.*)", r"\1Expressway North\2", streetname)
                streetname = re.sub(r"(.*)Expwy\s\bE\b(.*)", r"\1Expressway East\2", streetname)
                streetname = re.sub(r"(.*)Expwy\s\bS\b(.*)", r"\1Expressway South\2", streetname)
                streetname = re.sub(r"(.*)Expwy\s\bW\b(.*)", r"\1Expressway West\2", streetname)
                streetname = re.sub(r"(.*)Expwy(.*)", r"\1Expressway\2", streetname)
                # Add ordinal suffixes to numerals
                streetname = re.sub(r"(.*)(\d*11)\s+(.*)", r"\1\2th \3", streetname)
                streetname = re.sub(r"(.*)(\d*12)\s+(.*)", r"\1\2th \3", streetname)
                streetname = re.sub(r"(.*)(\d*13)\s+(.*)", r"\1\2th \3", streetname)
                streetname = re.sub(r"(.*)(\d*1)\s+(.*)", r"\1\2st \3", streetname)
                streetname = re.sub(r"(.*)(\d*2)\s+(.*)", r"\1\2nd \3", streetname)
                streetname = re.sub(r"(.*)(\d*3)\s+(.*)", r"\1\2rd \3", streetname)
                streetname = re.sub(r"(.*)(\d+)\s+(.*)", r"\1\2th \3", streetname)
                # Expand 'Ft' -> 'Fort'
                if streetname[0:3] == 'Ft ': streetname = 'Fort ' + streetname[3:]
                # Expand 'St ' -> 'Saint'
                if streetname[0:3] == 'St ': streetname = 'Saint ' + streetname[3:]
                # Expand 'Rev ' -> 'Reverend '
                if streetname[0:4] == 'Rev ': streetname = 'Reverend ' + streetname[3:]
                # Expand middlename ' St John' fix
                streetname = streetname.replace('St John', 'Saint John')
                # Middle name expansions
                streetname = streetname.replace(' St ', ' Street ')
                streetname = streetname.replace(' Rd ', ' Road ')
                streetname = streetname.replace(' Blvd ', ' Boulevard ')
                result['addr:street'] = streetname
            if address['ZIPCODE']:
                result['addr:postcode'] = str(int(address['ZIPCODE']))
        return result

    # Appends new node or returns existing if exists.
    nodes = {}
    def appendNewNode(coords, osmXml):
        rlon = int(float(coords[0]*10**7))
        rlat = int(float(coords[1]*10**7))
        if (rlon, rlat) in nodes:
            return nodes[(rlon, rlat)]
        node = etree.Element('node', visible = 'true', id = str(newOsmId('node')))
        node.set('lon', str(Decimal(coords[0])*Decimal(1)))
        node.set('lat', str(Decimal(coords[1])*Decimal(1)))
        nodes[(rlon, rlat)] = node
        osmXml.append(node)
        return node

    def appendNewWay(coords, intersects, osmXml):
        way = etree.Element('way', visible='true', id=str(newOsmId('way')))
        firstNid = 0
        for i, coord in enumerate(coords):
            if i == 0: continue # the first and last coordinate are the same
            node = appendNewNode(coord, osmXml)
            if i == 1: firstNid = node.get('id')
            way.append(etree.Element('nd', ref=node.get('id')))
            
            # Check each way segment for intersecting nodes
            int_nodes = {}
            try:
                line = LineString([coord, coords[i+1]])
            except IndexError:
                line = LineString([coord, coords[1]])
            for idx, c in enumerate(intersects):
                if line.buffer(0.000001).contains(Point(c[0], c[1])) and c not in coords:
                    t_node = appendNewNode(c, osmXml)
                    for n in way.iter('nd'):
                        if n.get('ref') == t_node.get('id'):
                            break
                    else:
                        int_nodes[t_node.get('id')] = Point(c).distance(Point(coord))
            for n in sorted(int_nodes, key=lambda key: int_nodes[key]): # add intersecting nodes in order
                way.append(etree.Element('nd', ref=n))
            
        way.append(etree.Element('nd', ref=firstNid)) # close way
        osmXml.append(way)
        return way

    # Appends an address to a given node or way.
    def appendAddress(address, element):
        for k, v in convertAddress(address['properties']).iteritems():
            element.append(etree.Element('tag', k=k, v=v))

    # Appends a building to a given OSM xml docuement.
    def appendBuilding(building, shape, address, osmXml):
        # Check for intersecting buildings
        intersects = []
        for i in buildingIdx.intersection(shape.bounds):
            try:
                for c in buildingShapes[i].exterior.coords:
                    if Point(c[0], c[1]).buffer(0.000001).intersects(shape):
                        intersects.append(c)
            except AttributeError:
                for c in buildingShapes[i][0].exterior.coords:
                    if Point(c[0], c[1]).buffer(0.000001).intersects(shape):
                        intersects.append(c)

        # Export building, create multipolygon if there are interior shapes.
        interiors = []
        try:
            way = appendNewWay(list(shape.exterior.coords), intersects, osmXml)
            for interior in shape.interiors:
                interiors.append(appendNewWay(list(interior.coords), [], osmXml))
        except AttributeError:
            way = appendNewWay(list(shape[0].exterior.coords), intersects, osmXml)
            for interior in shape[0].interiors:
                interiors.append(appendNewWay(list(interior.coords), [], osmXml))
        if len(interiors) > 0:
            relation = etree.Element('relation', visible='true', id=str(newOsmId('way')))
            relation.append(etree.Element('member', type='way', role='outer', ref=way.get('id')))
            for interior in interiors:
                relation.append(etree.Element('member', type='way', role='inner', ref=interior.get('id')))
            relation.append(etree.Element('tag', k='type', v='multipolygon'))
            osmXml.append(relation)
            way = relation
        way.append(etree.Element('tag', k='building', v='yes'))
        if 'HEIGHT_ROO' in building['properties']:
            height = round(((building['properties']['HEIGHT_ROO'] * 12) * 0.0254), 1)
            if height > 0:
                way.append(etree.Element('tag', k='height', v=str(height)))
        if 'BIN' in building['properties']:
            way.append(etree.Element('tag', k='nycdoitt:bin', v=str(building['properties']['BIN'])))
        if address: appendAddress(address, way)

    # Export buildings & addresses. Only export address with building if there is exactly
    # one address per building. Export remaining addresses as individual nodes.
    allAddresses = []
    osmXml = etree.Element('osm', version='0.6', generator='[email protected]')
    for i in range(0, len(buildings)):

        # Filter out special addresses categories A and B
        buildingAddresses = []
        for address in buildings[i]['properties']['addresses']:
            if address['properties']['SPECIAL_CO'] not in ['A', 'B']:
                buildingAddresses.append(address)
        address = None
        if len(buildingAddresses) == 1:
            address = buildingAddresses[0]
        else:
            allAddresses.extend(buildingAddresses)

        if int(buildings[i]['properties']['HEIGHT_ROO']) == 0:
            if shape.area > 1e-09:
                appendBuilding(buildings[i], buildingShapes[i], address, osmXml)
        else:
            appendBuilding(buildings[i], buildingShapes[i], address, osmXml)

    # Export any addresses that aren't the only address for a building.
    if (len(allAddresses) > 0):
        for address in allAddresses:
            node = appendNewNode(address['geometry']['coordinates'], osmXml)
            appendAddress(address, node)

    with open(osmOut, 'w') as outFile:
        outFile.writelines(tostring(osmXml, pretty_print=True, xml_declaration=True, encoding='UTF-8'))
        print 'Exported ' + osmOut
See More Examples - Go to Next Page
Page 1 Page 2 Page 3 Selected