sqlalchemy.func.sum

Here are the examples of the python api sqlalchemy.func.sum taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

32 Examples 7

Example 1

Project: backfeed-protocol
Source File: base.py
View license
    def sum_equally_voted_reputation(self, contribution, value):
        """return the sum of reputation of evaluators of evaluation.contribution that
        have evaluated the same value"""
        equally_voted_rep = DBSession.query(func.sum(User.reputation)).\
            join(Evaluation).\
            filter(Evaluation.contribution_id == contribution.id).\
            filter(Evaluation.value == value).\
            one()[0] or 0
        return equally_voted_rep

Example 2

Project: dokomoforms
Source File: column_properties.py
View license
def answer_sum(survey_node: AnswerableSurveyNode):
    """Get the sum of the answers."""
    return _answer_stat(
        survey_node,
        {'integer', 'decimal'},
        sa.func.sum,
    )

Example 3

Project: tvb-framework
Source File: datatype_dao.py
View license
    def get_disk_size_for_operation(self, operation_id):
        """
        Return the disk size for the operation by summing over the disk space of the resulting DataTypes.
        """
        try:
            disk_size = self.session.query(func.sum(model.DataType.disk_size)
                                           ).filter(model.DataType.fk_from_operation == operation_id).scalar() or 0
        except SQLAlchemyError, excep:
            self.logger.exception(excep)
            disk_size = 0
        return disk_size

Example 4

Project: tvb-framework
Source File: datatype_dao.py
View license
    def get_summary_for_group(self, datatype_group_id):
        """
        :return (disk_size SUM, subject)
        """
        result = 0, ""
        try:
            result = self.session.query(func.sum(model.DataType.disk_size), func.max(model.DataType.subject)
                                        ).filter(model.DataType.fk_datatype_group == datatype_group_id).all()[0] or result
        except SQLAlchemyError, excep:
            self.logger.exception(excep)

        return result

Example 5

Project: tvb-framework
Source File: datatype_dao.py
View license
    def get_datatype_group_disk_size(self, dt_group_id):
        """
        Return the size of all the DataTypes from this datatype group.
        """
        try:
            hdd_size = self.session.query(func.sum(model.DataType.disk_size)
                                          ).filter(model.DataType.fk_datatype_group == dt_group_id).scalar() or 0
        except SQLAlchemyError, ex:
            self.logger.exception(ex)
            hdd_size = 0
        return hdd_size

Example 6

Project: tvb-framework
Source File: operation_dao.py
View license
    def compute_disk_size_for_started_ops(self, user_id):
        """ Get all the disk space that should be reserved for the started operations of this user. """
        try:
            expected_hdd_size = self.session.query(func.sum(model.Operation.estimated_disk_size)
                                                   ).filter(model.Operation.fk_launched_by == user_id
                                                   ).filter(model.Operation.status == model.STATUS_STARTED).scalar()
        except SQLAlchemyError, excep:
            self.logger.exception(excep)
            expected_hdd_size = 0
        return expected_hdd_size or 0

Example 7

Project: tvb-framework
Source File: project_dao.py
View license
    def compute_user_generated_disk_size(self, user_id):
        """
        Do a SUM on DATA_TYPES table column DISK_SIZE, for the current user.
        :returns 0 when no DT are found, or SUM from DB.
        """
        try:
            total_size = self.session.query(func.sum(model.DataType.disk_size)).join(model.Operation
                                        ).filter(model.Operation.fk_launched_by == user_id).scalar()
            return total_size or 0
        except SQLAlchemyError, excep:
            self.logger.exception(excep)
            return -1

Example 8

Project: tvb-framework
Source File: project_dao.py
View license
    def get_project_disk_size(self, project_id):
        """
        Do a SUM on DATA_TYPES table column DISK_SIZE, for the current project.
        :returns 0 when no DT are found, or SUM from DB.
        """
        try:
            total_size = self.session.query(func.sum(model.DataType.disk_size)).join(model.Operation
                                        ).filter(model.Operation.fk_launched_in == project_id).scalar()
            return total_size or 0
        except SQLAlchemyError, excep:
            self.logger.exception(excep)
            return -1

Example 9

Project: sqlalchemy
Source File: test_deprecations.py
View license
    def test_apply_sum(self):
        """Query.apply_sum(col)

        avg = session.query(Address).apply_avg(Address.bounces)

        """

        Address = self.classes.Address

        session = create_session()

        avgs = list(session.query(Address).values(func.sum(Address.bounces)))
        avg = avgs[0][0]
        assert avg == 11

        avg = session.query(func.sum(Address.bounces)).one()[0]
        assert avg == 11

Example 10

Project: sqlalchemy
Source File: test_generative.py
View license
    @testing.fails_if(lambda:testing.against('mysql+mysqldb') and
            testing.db.dialect.dbapi.version_info[:4] == (1, 2, 1, 'gamma'),
            "unknown incompatibility")
    def test_aggregate_1(self):
        foo = self.tables.foo


        query = create_session().query(func.sum(foo.c.bar))
        assert query.filter(foo.c.bar<30).one() == (435,)

Example 11

Project: odo
Source File: test_sql.py
View license
def test_decimal_conversion():
    data = [(1.0,), (2.0,)]
    with tmpfile('.db') as fn:
        t = odo(data, 'sqlite:///%s::x' % fn, dshape='var * {x: decimal[11, 2]}')
        result = odo(sa.select([sa.func.sum(t.c.x)]), Decimal)
    assert result == sum(Decimal(r[0]) for r in data)

Example 12

Project: portingdb
Source File: queries.py
View license
def order_by_weight(db, query):
    subquery = db.query(
        tables.CollectionPackage,
        (func.sum(tables.Status.weight) + func.sum(tables.Priority.weight)).label('weight')
    )
    subquery = subquery.join(tables.CollectionPackage.status_obj)
    subquery = subquery.join(tables.CollectionPackage.priority_obj)
    subquery = subquery.group_by(tables.CollectionPackage.package_name)
    subquery = subquery.subquery()

    query = query.outerjoin(subquery, subquery.c.package_name == tables.Package.name)
    query = query.order_by(-subquery.c.weight)
    query = query.order_by(func.lower(tables.Package.name))
    return query

Example 13

View license
def checkNumberOfErrorsByJobId(jobId, errorType='fatal'):
    """Get the number of errors for a specified job and severity."""
    sess = GlobalDB.db().session
    errors = sess.query(func.sum(ErrorMetadata.occurrences)).\
        join(ErrorMetadata.severity).\
        filter(ErrorMetadata.job_id == jobId, RuleSeverity.name == errorType).scalar()
    # error_metadata table tallies total errors by job/file/field/error type. jobs that
    # don't have errors or warnings won't be in the table at all. thus, if the above query
    # returns an empty value that means the job didn't have any errors that matched
    # the specified severity type, so return 0
    return errors or 0

Example 14

View license
def get_submission_stats(submission_id):
    """Get summarized dollar amounts by submission."""
    sess = GlobalDB.db().session
    base_query = sess.query(func.sum(AwardFinancial.transaction_obligated_amou)).\
        filter(AwardFinancial.submission_id == submission_id)
    procurement = base_query.filter(AwardFinancial.piid != None)
    fin_assist = base_query.filter(or_(AwardFinancial.fain != None, AwardFinancial.uri != None))
    return {
        "total_obligations": float(base_query.scalar() or 0),
        "total_procurement_obligations": float(procurement.scalar() or 0),
        "total_assistance_obligations": float(fin_assist.scalar() or 0)
    }

Example 15

Project: indico
Source File: statistics.py
View license
def calculate_rooms_booked_time(rooms, start_date=None, end_date=None):
    if end_date is None:
        end_date = date.today() - relativedelta(days=1)
    if start_date is None:
        start_date = end_date - relativedelta(days=29)
    # Reservations on working days
    reservations = Reservation.find(Reservation.room_id.in_(r.id for r in rooms),
                                    extract('dow', ReservationOccurrence.start_dt).between(1, 5),
                                    ReservationOccurrence.start_dt >= start_date,
                                    ReservationOccurrence.end_dt <= end_date,
                                    ReservationOccurrence.is_valid,
                                    _join=ReservationOccurrence)
    # Take into account only working hours
    earliest_time = greatest(cast(ReservationOccurrence.start_dt, TIME), Location.working_time_start)
    latest_time = least(cast(ReservationOccurrence.end_dt, TIME), Location.working_time_end)
    booked_time = reservations.with_entities(func.sum(latest_time - earliest_time)).scalar()
    return (booked_time or timedelta()).total_seconds()

Example 16

Project: Pyrit
Source File: storage.py
View license
        def getStats(self):
            with SessionContext(self.SessionClass) as session:
                q = session.query(sql.func.sum(PAW2_DBObject.numElems))
                pwtotal = q.one()[0]
                pwtotal = 0 if pwtotal is None else int(pwtotal)
                q = session.query(ESSID_DBObject.essid,
                                  sql.func.sum(PAW2_DBObject.numElems))
                q = q.outerjoin(PYR2_DBObject).outerjoin(PAW2_DBObject)
                q = q.group_by(ESSID_DBObject.essid)
                essid_results = {}
                for essid, pwcount in q:
                    if pwcount is not None:
                        essid_results[str(essid)] = int(pwcount)
                    else:
                        essid_results[str(essid)] = 0
                return (pwtotal, essid_results)

Example 17

Project: Pyrit
Source File: storage.py
View license
        def getStats(self):
            with SessionContext(self.SessionClass) as session:
                q = session.query(sql.func.sum(PAW2_DBObject.numElems))
                pwtotal = q.one()[0]
                pwtotal = 0 if pwtotal is None else int(pwtotal)
                q = session.query(ESSID_DBObject.essid,
                                  sql.func.sum(PAW2_DBObject.numElems))
                q = q.outerjoin(PYR2_DBObject).outerjoin(PAW2_DBObject)
                q = q.group_by(ESSID_DBObject.essid)
                essid_results = {}
                for essid, pwcount in q:
                    if pwcount is not None:
                        essid_results[str(essid)] = int(pwcount)
                    else:
                        essid_results[str(essid)] = 0
                return (pwtotal, essid_results)

Example 18

Project: backfeed-protocol
Source File: base.py
View license
    def total_reputation(self):
        return DBSession.query(func.sum(User.reputation)).filter(User.contract == self).one()[0]

Example 19

View license
    def get_candidates(self, plugin, context, r_hd_binding_db):
        """Selection criteria: Hosting devices that...
               ... are based on the template required by router's type
               AND
               ... are administratively up
               AND
               ... are active (i.e., has status HD_ACTIVE)
               AND
               ... are bound to tenant owning router OR is unbound
               AND
               ... are enough slots available to host the router

            Among hosting devices meeting these criteria the device with
            less allocated slots is preferred.
        """
        # SELECT hosting_device_id, created_at, sum(num_allocated)
        # FROM hostingdevices AS hd
        # LEFT OUTER JOIN slotallocations AS sa ON hd.id=sa.hosting_device_id
        # WHERE
        #    hd.template_id='11111111-2222-3333-4444-555555555555' AND
        #    hd.admin_state_up=TRUE AND
        #    hd.status='ACTIVE' AND
        # <<<sharing case:>>>
        #    (hd.tenant_bound IS NULL OR hd.tenant_bound='t10')
        # <<<non-sharing case:>>>
        #    (sa.tenant_bound='t10' OR
        #     (sa.tenant_bound IS NULL AND sa.logical_resource_owner='t10') OR
        #     hd.tenant_bound='t10' OR
        #     (hd.tenant_bound IS NULL AND sa.hosting_device_id IS NULL))
        # GROUP BY hosting_device_id
        # HAVING sum(num_allocated) <= 8 OR sum(num_allocated) IS NULL
        # ORDER BY created_at;
        router = r_hd_binding_db.router
        tenant_id = router['tenant_id']
        router_type_db = r_hd_binding_db.router_type
        template_id = router_type_db['template_id']
        template_db = router_type_db.template
        slot_threshold = template_db.slot_capacity - router_type_db.slot_need

        query = context.session.query(
            hd_models.HostingDevice.id, hd_models.HostingDevice.created_at,
            func.sum(hd_models.SlotAllocation.num_allocated))
        query = query.outerjoin(
            hd_models.SlotAllocation,
            hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.status == cisco_constants.HD_ACTIVE)
        if r_hd_binding_db.share_hosting_device:
            query = query.filter(
                expr.or_(hd_models.HostingDevice.tenant_bound == expr.null(),
                         hd_models.HostingDevice.tenant_bound == tenant_id))
        else:
            query = query.filter(
                expr.or_(
                    hd_models.SlotAllocation.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.SlotAllocation.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.logical_resource_owner ==
                        tenant_id),
                    hd_models.HostingDevice.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.HostingDevice.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.hosting_device_id ==
                        expr.null())))
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            expr.or_(func.sum(
                hd_models.SlotAllocation.num_allocated) <= slot_threshold,
                func.sum(hd_models.SlotAllocation.num_allocated ==
                         expr.null())))
        query = query.order_by(hd_models.HostingDevice.created_at)
        return query.all()

Example 20

View license
    def get_candidates(self, plugin, context, r_hd_binding_db):
        """Selection criteria: Hosting devices that...
               ... are based on the template required by router's type
               AND
               ... are administratively up
               AND
               ... are active (i.e., has status HD_ACTIVE)
               AND
               ... are bound to tenant owning router OR is unbound
               AND
               ... are enough slots available to host the router

            Among hosting devices meeting these criteria the device with
            less allocated slots is preferred.
        """
        # SELECT hosting_device_id, created_at, sum(num_allocated)
        # FROM hostingdevices AS hd
        # LEFT OUTER JOIN slotallocations AS sa ON hd.id=sa.hosting_device_id
        # WHERE
        #    hd.template_id='11111111-2222-3333-4444-555555555555' AND
        #    hd.admin_state_up=TRUE AND
        #    hd.status='ACTIVE' AND
        # <<<sharing case:>>>
        #    (hd.tenant_bound IS NULL OR hd.tenant_bound='t10')
        # <<<non-sharing case:>>>
        #    (sa.tenant_bound='t10' OR
        #     (sa.tenant_bound IS NULL AND sa.logical_resource_owner='t10') OR
        #     hd.tenant_bound='t10' OR
        #     (hd.tenant_bound IS NULL AND sa.hosting_device_id IS NULL))
        # GROUP BY hosting_device_id
        # HAVING sum(num_allocated) <= 8 OR sum(num_allocated) IS NULL
        # ORDER BY created_at;
        router = r_hd_binding_db.router
        tenant_id = router['tenant_id']
        router_type_db = r_hd_binding_db.router_type
        template_id = router_type_db['template_id']
        template_db = router_type_db.template
        slot_threshold = template_db.slot_capacity - router_type_db.slot_need

        query = context.session.query(
            hd_models.HostingDevice.id, hd_models.HostingDevice.created_at,
            func.sum(hd_models.SlotAllocation.num_allocated))
        query = query.outerjoin(
            hd_models.SlotAllocation,
            hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.status == cisco_constants.HD_ACTIVE)
        if r_hd_binding_db.share_hosting_device:
            query = query.filter(
                expr.or_(hd_models.HostingDevice.tenant_bound == expr.null(),
                         hd_models.HostingDevice.tenant_bound == tenant_id))
        else:
            query = query.filter(
                expr.or_(
                    hd_models.SlotAllocation.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.SlotAllocation.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.logical_resource_owner ==
                        tenant_id),
                    hd_models.HostingDevice.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.HostingDevice.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.hosting_device_id ==
                        expr.null())))
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            expr.or_(func.sum(
                hd_models.SlotAllocation.num_allocated) <= slot_threshold,
                func.sum(hd_models.SlotAllocation.num_allocated) ==
                         expr.null()))
        query = query.order_by(hd_models.HostingDevice.created_at)
        return query.all()

Example 21

View license
    def get_candidates(self, plugin, context, r_hd_binding_db):
        """Selection criteria: Hosting devices that...
               ... are based on the template required by router's type
               AND
               ... are administratively up
               AND
               ... are active (i.e., has status HD_ACTIVE)
               AND
               ... are bound to tenant owning router OR is unbound
               AND
               ... are enough slots available to host the router

            Among hosting devices meeting these criteria the device with
            less allocated slots is preferred.
        """
        # SELECT hosting_device_id, created_at, sum(num_allocated)
        # FROM hostingdevices AS hd
        # LEFT OUTER JOIN slotallocations AS sa ON hd.id=sa.hosting_device_id
        # WHERE
        #    hd.template_id='11111111-2222-3333-4444-555555555555' AND
        #    hd.admin_state_up=TRUE AND
        #    hd.status='ACTIVE' AND
        # <<<sharing case:>>>
        #    (hd.tenant_bound IS NULL OR hd.tenant_bound='t10')
        # <<<non-sharing case:>>>
        #    (sa.tenant_bound='t10' OR
        #     (sa.tenant_bound IS NULL AND sa.logical_resource_owner='t10') OR
        #     hd.tenant_bound='t10' OR
        #     (hd.tenant_bound IS NULL AND sa.hosting_device_id IS NULL))
        # GROUP BY hosting_device_id
        # HAVING sum(num_allocated) <= 8 OR sum(num_allocated) IS NULL
        # ORDER BY created_at;
        router = r_hd_binding_db.router
        tenant_id = router['tenant_id']
        router_type_db = r_hd_binding_db.router_type
        template_id = router_type_db['template_id']
        template_db = router_type_db.template
        slot_threshold = template_db.slot_capacity - router_type_db.slot_need

        query = context.session.query(
            hd_models.HostingDevice.id, hd_models.HostingDevice.created_at,
            func.sum(hd_models.SlotAllocation.num_allocated))
        query = query.outerjoin(
            hd_models.SlotAllocation,
            hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.status == cisco_constants.HD_ACTIVE)
        if r_hd_binding_db.share_hosting_device:
            query = query.filter(
                expr.or_(hd_models.HostingDevice.tenant_bound == expr.null(),
                         hd_models.HostingDevice.tenant_bound == tenant_id))
        else:
            query = query.filter(
                expr.or_(
                    hd_models.SlotAllocation.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.SlotAllocation.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.logical_resource_owner ==
                        tenant_id),
                    hd_models.HostingDevice.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.HostingDevice.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.hosting_device_id ==
                        expr.null())))
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            expr.or_(func.sum(
                hd_models.SlotAllocation.num_allocated) <= slot_threshold,
                func.sum(hd_models.SlotAllocation.num_allocated ==
                         expr.null())))
        query = query.order_by(hd_models.HostingDevice.created_at)
        return query.all()

Example 22

View license
    def get_candidates(self, plugin, context, r_hd_binding_db):
        """Selection criteria: Hosting devices that...
               ... are based on the template required by router's type
               AND
               ... are administratively up
               AND
               ... are active (i.e., has status HD_ACTIVE)
               AND
               ... are bound to tenant owning router OR is unbound
               AND
               ... are enough slots available to host the router

            Among hosting devices meeting these criteria the device with
            less allocated slots is preferred.
        """
        # SELECT hosting_device_id, created_at, sum(num_allocated)
        # FROM hostingdevices AS hd
        # LEFT OUTER JOIN slotallocations AS sa ON hd.id=sa.hosting_device_id
        # WHERE
        #    hd.template_id='11111111-2222-3333-4444-555555555555' AND
        #    hd.admin_state_up=TRUE AND
        #    hd.status='ACTIVE' AND
        # <<<sharing case:>>>
        #    (hd.tenant_bound IS NULL OR hd.tenant_bound='t10')
        # <<<non-sharing case:>>>
        #    (sa.tenant_bound='t10' OR
        #     (sa.tenant_bound IS NULL AND sa.logical_resource_owner='t10') OR
        #     hd.tenant_bound='t10' OR
        #     (hd.tenant_bound IS NULL AND sa.hosting_device_id IS NULL))
        # GROUP BY hosting_device_id
        # HAVING sum(num_allocated) <= 8 OR sum(num_allocated) IS NULL
        # ORDER BY created_at;
        router = r_hd_binding_db.router
        tenant_id = router['tenant_id']
        router_type_db = r_hd_binding_db.router_type
        template_id = router_type_db['template_id']
        template_db = router_type_db.template
        slot_threshold = template_db.slot_capacity - router_type_db.slot_need

        query = context.session.query(
            hd_models.HostingDevice.id, hd_models.HostingDevice.created_at,
            func.sum(hd_models.SlotAllocation.num_allocated))
        query = query.outerjoin(
            hd_models.SlotAllocation,
            hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.status == cisco_constants.HD_ACTIVE)
        if r_hd_binding_db.share_hosting_device:
            query = query.filter(
                expr.or_(hd_models.HostingDevice.tenant_bound == expr.null(),
                         hd_models.HostingDevice.tenant_bound == tenant_id))
        else:
            query = query.filter(
                expr.or_(
                    hd_models.SlotAllocation.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.SlotAllocation.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.logical_resource_owner ==
                        tenant_id),
                    hd_models.HostingDevice.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.HostingDevice.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.hosting_device_id ==
                        expr.null())))
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            expr.or_(func.sum(
                hd_models.SlotAllocation.num_allocated) <= slot_threshold,
                func.sum(hd_models.SlotAllocation.num_allocated) ==
                         expr.null()))
        query = query.order_by(hd_models.HostingDevice.created_at)
        return query.all()

Example 23

Project: tvb-framework
Source File: datatype_dao.py
View license
    def compute_bursts_disk_size(self, burst_ids):
        """
        SUM the disk_size of all data types generated by each requested burst
        Do not count DataType Groups as those already include the size of the entities inside the group.
        :returns a map from burst id to disk size
        """
        # do not execute a query that will return []
        if not burst_ids:
            return {}
        # The query might return less results than burst_ids.
        # This happens if a burst entity has not been persisted yet.
        # For those bursts the size will be zero
        ret = {b_id: 0 for b_id in burst_ids}
        try:
            query = self.session.query(model.DataType.fk_parent_burst, func.sum(model.DataType.disk_size)
                        ).group_by(model.DataType.fk_parent_burst
                        ).filter(model.DataType.type != "DataTypeGroup"
                        ).filter(model.DataType.fk_parent_burst.in_(burst_ids))
            for b_id, size in query.all():
                ret[b_id] = size or 0
        except SQLAlchemyError, excep:
            self.logger.exception(excep)
        return ret

Example 24

Project: Camelot
Source File: test_properties.py
View license
    def test_column_property_eagerload_and_reuse(self):
        
        class Tag(self.Entity):
            score1 = Field(Float)
            score2 = Field(Float)

            user = ManyToOne('User')

            score = ColumnProperty(lambda c: c.score1 * c.score2)

        class User(self.Entity):
            name = Field(String(16))
            category = ManyToOne('Category')
            tags = OneToMany('Tag', lazy=False)
            score = ColumnProperty(lambda c:
                                   select([func.sum(Tag.score)],
                                          Tag.user_id == c.id).as_scalar())

        class Category(self.Entity):
            name = Field(String(16))
            users = OneToMany('User', lazy=False)

            score = ColumnProperty(lambda c:
                                   select([func.avg(User.score)],
                                          User.category_id == c.id
                                         ).as_scalar())
            
        self.create_all()
            
        with self.session.begin():
            u1 = User(name='joe', tags=[Tag(score1=5.0, score2=3.0),
                                             Tag(score1=55.0, score2=1.0)] )
    
            u2 = User(name='bar', tags=[Tag(score1=5.0, score2=4.0),
                                             Tag(score1=50.0, score2=1.0),
                                             Tag(score1=15.0, score2=2.0)])
    
            c1 = Category(name='dummy', users=[u1, u2] )

        self.session.expunge_all()

        category = Category.query.one()
        assert len( category.users ) == 2 
        for user in category.users:
            assert len( user.tags ) > 0
            assert user.score == sum([tag.score for tag in user.tags])
            for tag in user.tags:
                assert tag.score == tag.score1 * tag.score2
        assert category.score == 85

Example 25

Project: Camelot
Source File: test_properties.py
View license
    def test_column_property_eagerload_and_reuse(self):
        
        class Tag(self.Entity):
            score1 = Field(Float)
            score2 = Field(Float)

            user = ManyToOne('User')

            score = ColumnProperty(lambda c: c.score1 * c.score2)

        class User(self.Entity):
            name = Field(String(16))
            category = ManyToOne('Category')
            tags = OneToMany('Tag', lazy=False)
            score = ColumnProperty(lambda c:
                                   select([func.sum(Tag.score)],
                                          Tag.user_id == c.id).as_scalar())

        class Category(self.Entity):
            name = Field(String(16))
            users = OneToMany('User', lazy=False)

            score = ColumnProperty(lambda c:
                                   select([func.avg(User.score)],
                                          User.category_id == c.id
                                         ).as_scalar())
            
        self.create_all()
            
        with self.session.begin():
            u1 = User(name='joe', tags=[Tag(score1=5.0, score2=3.0),
                                             Tag(score1=55.0, score2=1.0)] )
    
            u2 = User(name='bar', tags=[Tag(score1=5.0, score2=4.0),
                                             Tag(score1=50.0, score2=1.0),
                                             Tag(score1=15.0, score2=2.0)])
    
            c1 = Category(name='dummy', users=[u1, u2] )

        self.session.expunge_all()

        category = Category.query.one()
        assert len( category.users ) == 2 
        for user in category.users:
            assert len( user.tags ) > 0
            assert user.score == sum([tag.score for tag in user.tags])
            for tag in user.tags:
                assert tag.score == tag.score1 * tag.score2
        assert category.score == 85

Example 26

View license
    @provide_session
    def _get_dep_statuses(self, ti, session, dep_context):
        TI = airflow.models.TaskInstance
        TR = airflow.models.TriggerRule

        # Checking that all upstream dependencies have succeeded
        if not ti.task.upstream_list:
            yield self._passing_status(
                reason="The task instance did not have any upstream tasks.")
            raise StopIteration

        if ti.task.trigger_rule == TR.DUMMY:
            yield self._passing_status(reason="The task had a dummy trigger rule set.")
            raise StopIteration

        # TODO(unknown): this query becomes quite expensive with dags that have many
        # tasks. It should be refactored to let the task report to the dag run and get the
        # aggregates from there.
        qry = (
            session
            .query(
                func.coalesce(func.sum(
                    case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.FAILED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
                func.count(TI.task_id),
            )
            .filter(
                TI.dag_id == ti.dag_id,
                TI.task_id.in_(ti.task.upstream_task_ids),
                TI.execution_date == ti.execution_date,
                TI.state.in_([
                    State.SUCCESS, State.FAILED,
                    State.UPSTREAM_FAILED, State.SKIPPED]),
            )
        )

        successes, skipped, failed, upstream_failed, done = qry.first()
        for dep_status in self._evaluate_trigger_rule(
                ti=ti,
                successes=successes,
                skipped=skipped,
                failed=failed,
                upstream_failed=upstream_failed,
                done=done,
                flag_upstream_failed=dep_context.flag_upstream_failed,
                session=session):
            yield dep_status

Example 27

View license
    @provide_session
    def _get_dep_statuses(self, ti, session, dep_context):
        TI = airflow.models.TaskInstance
        TR = airflow.models.TriggerRule

        # Checking that all upstream dependencies have succeeded
        if not ti.task.upstream_list:
            yield self._passing_status(
                reason="The task instance did not have any upstream tasks.")
            raise StopIteration

        if ti.task.trigger_rule == TR.DUMMY:
            yield self._passing_status(reason="The task had a dummy trigger rule set.")
            raise StopIteration

        # TODO(unknown): this query becomes quite expensive with dags that have many
        # tasks. It should be refactored to let the task report to the dag run and get the
        # aggregates from there.
        qry = (
            session
            .query(
                func.coalesce(func.sum(
                    case([(TI.state == State.SUCCESS, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.SKIPPED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.FAILED, 1)], else_=0)), 0),
                func.coalesce(func.sum(
                    case([(TI.state == State.UPSTREAM_FAILED, 1)], else_=0)), 0),
                func.count(TI.task_id),
            )
            .filter(
                TI.dag_id == ti.dag_id,
                TI.task_id.in_(ti.task.upstream_task_ids),
                TI.execution_date == ti.execution_date,
                TI.state.in_([
                    State.SUCCESS, State.FAILED,
                    State.UPSTREAM_FAILED, State.SKIPPED]),
            )
        )

        successes, skipped, failed, upstream_failed, done = qry.first()
        for dep_status in self._evaluate_trigger_rule(
                ti=ti,
                successes=successes,
                skipped=skipped,
                failed=failed,
                upstream_failed=upstream_failed,
                done=done,
                flag_upstream_failed=dep_context.flag_upstream_failed,
                session=session):
            yield dep_status

Example 28

Project: lrrbot
Source File: game_data.py
View license
def merge_games(conn, metadata, old_id, new_id, result_id):
	"""
	NOT THREADSAFE. Use `lock_tables` before calling `merge_games`. `old_id`, `new_id` and
	`result_id` must already exist in the database.
	"""

	if old_id == result_id and new_id == result_id:
		return

	quotes = metadata.tables["quotes"]
	conn.execute(quotes.update().where(quotes.c.game_id.in_({old_id, new_id})), {
		"game_id": result_id,
	})

	game_stats = metadata.tables["game_stats"]
	query = insert(game_stats) \
		.from_select(["game_id", "show_id", "stat_id", "count"],
			sqlalchemy.select([result_id, game_stats.c.show_id, game_stats.c.stat_id, sqlalchemy.func.sum(game_stats.c.count)])
				.where((game_stats.c.game_id == old_id) | (game_stats.c.game_id == new_id))
				.group_by(game_stats.c.show_id, game_stats.c.stat_id)
		)
	query = query.on_conflict_do_update(
		index_elements=[game_stats.c.game_id, game_stats.c.show_id, game_stats.c.stat_id],
		set_={
			'count': query.excluded.count,
		}
	)
	conn.execute(query, {
		"result_id": result_id,
		"old_id": old_id,
		"new_id": new_id,
	})
	conn.execute(game_stats.delete().where(game_stats.c.game_id.in_({old_id, new_id} - {result_id})))

	game_votes = metadata.tables["game_votes"]
	try:
		with conn.begin_nested():
			conn.execute(game_votes.update().where(game_votes.c.game_id.in_({old_id, new_id})), {
				"game_id": result_id,
			})
	except sqlalchemy.exc.IntegrityError:
		with conn.begin_nested():
			res = conn.execute(sqlalchemy.select([
				game_votes.c.show_id, game_votes.c.user_id, game_votes.c.vote
			]).where(game_votes.c.game_id == old_id))

			votes = {
				(show_id, user_id): vote
				for show_id, user_id, vote in res
			}

			res = conn.execute(sqlalchemy.select([
				game_votes.c.show_id, game_votes.c.user_id, game_votes.c.vote
			]).where(game_votes.c.game_id == new_id))

			votes.update({
				(show_id, user_id): vote
				for show_id, user_id, vote in res
			})

			conn.execute(game_votes.delete().where(game_votes.c.game_id.in_({old_id, new_id})))
			conn.execute(game_votes.insert(), [
				{
					"game_id": result_id,
					"show_id": show_id,
					"user_id": user_id,
					"vote": vote
				}
				for (show_id, user_id), vote in votes.items()
			])

	game_per_show_data = metadata.tables["game_per_show_data"]
	try:
		with conn.begin_nested():
			conn.execute(game_per_show_data.update()
				.where(game_per_show_data.c.game_id.in_({old_id, new_id})), {
				"game_id": result_id,
			})
	except sqlalchemy.exc.IntegrityError:
		with conn.begin_nested():
			res = conn.execute(sqlalchemy.select([
				game_per_show_data.c.show_id, game_per_show_data.c.display_name,
				game_per_show_data.c.verified
			]).where(game_per_show_data.c.game_id == old_id))

			data = {
				show_id: (display_name, verified)
				for show_id, display_name, verified in res
			}

			res = conn.execute(sqlalchemy.select([
				game_per_show_data.c.show_id, game_per_show_data.c.display_name,
				game_per_show_data.c.verifed
			]).where(game_per_show_data.c.game_id == new_id))

			for show_id, new_display_name, new_verified in res:
				old_display_name, old_verified = data.get(show_id, (None, None))
				data[show_id] = (
					new_display_name or old_display_name,
					new_verified if new_verified is not None else old_verified
				)

			conn.execute(game_per_show_data.delete()
				.where(game_per_show_data.c.game_id.in_({old_id, new_id})))
			conn.execute(game_per_show_data.insert(), [
				{
					"game_id": result_id,
					"show_id": show_id,
					"display_name": display_name,
					"verified": verified,
				}
				for show_id, (display_name, verified) in data.items()
			])

	games = metadata.tables["games"]
	conn.execute(games.delete().where(games.c.id.in_({old_id, new_id} - {result_id})))

Example 29

Project: lrrbot
Source File: game_data.py
View license
def merge_games(conn, metadata, old_id, new_id, result_id):
	"""
	NOT THREADSAFE. Use `lock_tables` before calling `merge_games`. `old_id`, `new_id` and
	`result_id` must already exist in the database.
	"""

	if old_id == result_id and new_id == result_id:
		return

	quotes = metadata.tables["quotes"]
	conn.execute(quotes.update().where(quotes.c.game_id.in_({old_id, new_id})), {
		"game_id": result_id,
	})

	game_stats = metadata.tables["game_stats"]
	query = insert(game_stats) \
		.from_select(["game_id", "show_id", "stat_id", "count"],
			sqlalchemy.select([result_id, game_stats.c.show_id, game_stats.c.stat_id, sqlalchemy.func.sum(game_stats.c.count)])
				.where((game_stats.c.game_id == old_id) | (game_stats.c.game_id == new_id))
				.group_by(game_stats.c.show_id, game_stats.c.stat_id)
		)
	query = query.on_conflict_do_update(
		index_elements=[game_stats.c.game_id, game_stats.c.show_id, game_stats.c.stat_id],
		set_={
			'count': query.excluded.count,
		}
	)
	conn.execute(query, {
		"result_id": result_id,
		"old_id": old_id,
		"new_id": new_id,
	})
	conn.execute(game_stats.delete().where(game_stats.c.game_id.in_({old_id, new_id} - {result_id})))

	game_votes = metadata.tables["game_votes"]
	try:
		with conn.begin_nested():
			conn.execute(game_votes.update().where(game_votes.c.game_id.in_({old_id, new_id})), {
				"game_id": result_id,
			})
	except sqlalchemy.exc.IntegrityError:
		with conn.begin_nested():
			res = conn.execute(sqlalchemy.select([
				game_votes.c.show_id, game_votes.c.user_id, game_votes.c.vote
			]).where(game_votes.c.game_id == old_id))

			votes = {
				(show_id, user_id): vote
				for show_id, user_id, vote in res
			}

			res = conn.execute(sqlalchemy.select([
				game_votes.c.show_id, game_votes.c.user_id, game_votes.c.vote
			]).where(game_votes.c.game_id == new_id))

			votes.update({
				(show_id, user_id): vote
				for show_id, user_id, vote in res
			})

			conn.execute(game_votes.delete().where(game_votes.c.game_id.in_({old_id, new_id})))
			conn.execute(game_votes.insert(), [
				{
					"game_id": result_id,
					"show_id": show_id,
					"user_id": user_id,
					"vote": vote
				}
				for (show_id, user_id), vote in votes.items()
			])

	game_per_show_data = metadata.tables["game_per_show_data"]
	try:
		with conn.begin_nested():
			conn.execute(game_per_show_data.update()
				.where(game_per_show_data.c.game_id.in_({old_id, new_id})), {
				"game_id": result_id,
			})
	except sqlalchemy.exc.IntegrityError:
		with conn.begin_nested():
			res = conn.execute(sqlalchemy.select([
				game_per_show_data.c.show_id, game_per_show_data.c.display_name,
				game_per_show_data.c.verified
			]).where(game_per_show_data.c.game_id == old_id))

			data = {
				show_id: (display_name, verified)
				for show_id, display_name, verified in res
			}

			res = conn.execute(sqlalchemy.select([
				game_per_show_data.c.show_id, game_per_show_data.c.display_name,
				game_per_show_data.c.verifed
			]).where(game_per_show_data.c.game_id == new_id))

			for show_id, new_display_name, new_verified in res:
				old_display_name, old_verified = data.get(show_id, (None, None))
				data[show_id] = (
					new_display_name or old_display_name,
					new_verified if new_verified is not None else old_verified
				)

			conn.execute(game_per_show_data.delete()
				.where(game_per_show_data.c.game_id.in_({old_id, new_id})))
			conn.execute(game_per_show_data.insert(), [
				{
					"game_id": result_id,
					"show_id": show_id,
					"display_name": display_name,
					"verified": verified,
				}
				for show_id, (display_name, verified) in data.items()
			])

	games = metadata.tables["games"]
	conn.execute(games.delete().where(games.c.id.in_({old_id, new_id} - {result_id})))

Example 30

View license
    def get_candidates(self, plugin, context, r_hd_binding_db):
        """Selection criteria: Hosting devices that...
               ... are based on the template required by router's type
               AND
               ... are administratively up
               AND
               ... are active (i.e., has status HD_ACTIVE)
               AND
               ... are bound to tenant owning router OR is unbound
               AND
               ... are enough slots available to host the router

            Among hosting devices meeting these criteria the device with
            less allocated slots is preferred.
        """
        # SELECT hosting_device_id, created_at, sum(num_allocated)
        # FROM hostingdevices AS hd
        # LEFT OUTER JOIN slotallocations AS sa ON hd.id=sa.hosting_device_id
        # WHERE
        #    hd.template_id='11111111-2222-3333-4444-555555555555' AND
        #    hd.admin_state_up=TRUE AND
        #    hd.status='ACTIVE' AND
        # <<<sharing case:>>>
        #    (hd.tenant_bound IS NULL OR hd.tenant_bound='t10')
        # <<<non-sharing case:>>>
        #    (sa.tenant_bound='t10' OR
        #     (sa.tenant_bound IS NULL AND sa.logical_resource_owner='t10') OR
        #     hd.tenant_bound='t10' OR
        #     (hd.tenant_bound IS NULL AND sa.hosting_device_id IS NULL))
        # GROUP BY hosting_device_id
        # HAVING sum(num_allocated) <= 8 OR sum(num_allocated) IS NULL
        # ORDER BY created_at;
        router = r_hd_binding_db.router
        tenant_id = router['tenant_id']
        router_type_db = r_hd_binding_db.router_type
        template_id = router_type_db['template_id']
        template_db = router_type_db.template
        slot_threshold = template_db.slot_capacity - router_type_db.slot_need

        query = context.session.query(
            hd_models.HostingDevice.id, hd_models.HostingDevice.created_at,
            func.sum(hd_models.SlotAllocation.num_allocated))
        query = query.outerjoin(
            hd_models.SlotAllocation,
            hd_models.HostingDevice.id ==
            hd_models.SlotAllocation.hosting_device_id)
        query = query.filter(
            hd_models.HostingDevice.template_id == template_id,
            hd_models.HostingDevice.admin_state_up == expr.true(),
            hd_models.HostingDevice.status == cisco_constants.HD_ACTIVE)
        if r_hd_binding_db.share_hosting_device:
            query = query.filter(
                expr.or_(hd_models.HostingDevice.tenant_bound == expr.null(),
                         hd_models.HostingDevice.tenant_bound == tenant_id))
        else:
            query = query.filter(
                expr.or_(
                    hd_models.SlotAllocation.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.SlotAllocation.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.logical_resource_owner ==
                        tenant_id),
                    hd_models.HostingDevice.tenant_bound == tenant_id,
                    expr.and_(
                        hd_models.HostingDevice.tenant_bound == expr.null(),
                        hd_models.SlotAllocation.hosting_device_id ==
                        expr.null())))
        query = query.group_by(hd_models.HostingDevice.id)
        query = query.having(
            expr.or_(func.sum(
                hd_models.SlotAllocation.num_allocated) <= slot_threshold,
                func.sum(hd_models.SlotAllocation.num_allocated ==
                         expr.null())))
        query = query.order_by(hd_models.HostingDevice.created_at)
        return query.all()

Example 31

Project: maproulette
Source File: __init__.py
View license
    def get(self, challenge_slug=None, user_id=None):
        from dateutil import parser as dateparser
        from datetime import datetime
        from maproulette.models import AggregateMetrics

        start = None
        end = None

        parser = reqparse.RequestParser()
        parser.add_argument('start', type=str,
                            help='start datetime yyyymmddhhmm')
        parser.add_argument('end', type=str,
                            help='end datetime yyyymmddhhmm')

        args = parser.parse_args()

        breakdown = False

        select_fields = [
            AggregateMetrics.status,
            func.sum(AggregateMetrics.count)]

        group_fields = [
            AggregateMetrics.status]

        if request.path.endswith('/users'):
            select_fields.insert(0, AggregateMetrics.user_name)
            group_fields.insert(0, AggregateMetrics.user_name)
            breakdown = True
        elif request.path.endswith('/challenges'):
            select_fields.insert(0, AggregateMetrics.challenge_slug)
            group_fields.insert(0, AggregateMetrics.challenge_slug)
            breakdown = True

        stats_query = db.session.query(
            *select_fields).group_by(
            *group_fields)

        # stats for a specific challenge
        if challenge_slug is not None:
            stats_query = stats_query.filter_by(
                challenge_slug=challenge_slug)

        # stats for a specific user
        if user_id is not None:
            stats_query = stats_query.filter_by(
                user_id=user_id)

        # time slicing filters
        if args['start'] is not None:
            start = dateparser.parse(args['start'])
            if args['end'] is None:
                end = datetime.utcnow()
            else:
                end = dateparser.parse(args['end'])
            stats_query = stats_query.filter(
                AggregateMetrics.timestamp.between(start, end))

        if breakdown:
            # if this is a breakdown by a secondary variable, the
            # query will have returned three columns and we need to
            # build a nested dictionary.
            return as_stats_dict(stats_query.all(), start=start, end=end)
        else:
            return dict(stats_query.all())

Example 32

Project: maproulette
Source File: __init__.py
View license
    def get(self, challenge_slug=None, user_id=None):

        from maproulette.models import HistoricalMetrics as HM

        start = None
        end = None

        from dateutil import parser as dateparser
        from datetime import datetime
        parser = reqparse.RequestParser()
        parser.add_argument('start', type=str,
                            help='start datetime yyyymmddhhmm')
        parser.add_argument('end', type=str,
                            help='end datetime yyyymmddhhmm')

        args = parser.parse_args()

        stats_query = db.session.query(
            HM.timestamp,
            HM.status,
            func.sum(HM.count))

        if challenge_slug is not None:
            stats_query = stats_query.filter(HM.challenge_slug == challenge_slug)
        if user_id is not None:
            stats_query = stats_query.filter(HM.user_id == user_id)

        stats_query = stats_query.group_by(
            HM.timestamp, HM.status).order_by(
            HM.status)

        # time slicing filters
        if args['start'] is not None:
            start = dateparser.parse(args['start'])
            if args['end'] is None:
                end = datetime.utcnow()
            else:
                end = dateparser.parse(args['end'])
            stats_query = stats_query.filter(
                Action.timestamp.between(start, end))

        return as_stats_dict(
            stats_query.all(),
            order=[1, 0, 2],
            start=start,
            end=end)