sqlalchemy.func.count.label

Here are the examples of the python api sqlalchemy.func.count.label taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.

35 Examples 7

Example 1

Project: bubbles Source File: ops.py
@duplicate_stats.register("sql_statement")
def _(ctx, obj, fields=None, threshold=1):
    """Return duplicate statistics of a `statement`"""
    count_label = "__record_count"
    dups = duplicates(obj, threshold, count_label)
    statement = dups.statement
    statement = statement.alias("duplicates")

    counter = sqlalchemy.func.count("*").label("record_count")
    group = statement.c[count_label]
    result_stat = sqlalchemy.sql.expression.select([counter, group],
                                              from_obj=statement,
                                              group_by=[group])

    fields = dups.fields.clone()
    fields.add(count_label)

    result = obj.clone_statement(statement=result_stat, fields=fields)
    return result

Example 2

Project: holmes-api Source File: domain.py
    @classmethod
    def get_violations_per_domain(cls, db):
        from holmes.models import Review, Violation

        violations = db \
            .query(Review.domain_id, sa.func.count(Violation.id).label('count')) \
            .filter(Violation.review_id == Review.id) \
            .filter(Review.is_active == True) \
            .group_by(Review.domain_id) \
            .all()

        domains = {}
        for domain in violations:
            domains[domain.domain_id] = domain.count

        return domains

Example 3

Project: holmes-api Source File: domain.py
    def get_violation_data(self, db):
        from holmes.models import Violation

        result = db.query(sa.func.count(Violation.id).label('count')) \
            .filter(
                Violation.domain_id == self.id,
                Violation.review_is_active == True) \
            .one()

        return (
            result.count
        )

Example 4

Project: holmes-api Source File: violation.py
    @classmethod
    def get_group_by_value_for_key(cls, db, key_name):
        from holmes.models.keys import Key  # to avoid circular dependency
        from holmes.models.violation import Violation  # to avoid circular dependency

        return db \
            .query(
                Violation.value,
                sa.func.count(Violation.key_id).label('count')
            ) \
            .filter(Key.name == key_name) \
            .filter(Key.id == Violation.key_id) \
            .filter(Violation.review_is_active == True) \
            .group_by(Violation.value) \
            .order_by('count DESC') \
            .all()

Example 5

Project: indico Source File: controllers.py
    def _iter_allowed_rooms(self):
        query = (db.session.query(VCRoom, func.count(VCRoomEventAssociation.id).label('event_count'))
                 .filter(func.lower(VCRoom.name).contains(self.query.lower()), VCRoom.status != VCRoomStatus.deleted,
                         VCRoom.type == self.plugin.service_name)
                 .join(VCRoomEventAssociation)
                 # Plugins might add eager-loaded extensions to the table - since we cannot group by them
                 # we need to make sure everything is lazy-loaded here.
                 .options((lazyload(r) for r in inspect(VCRoom).relationships.keys()),
                          joinedload('events').joinedload('event_new').joinedload('acl_entries'))
                 .group_by(VCRoom.id)
                 .order_by(db.desc('event_count'))
                 .limit(10))

        return ((room, count) for room, count in query if room.plugin.can_manage_vc_room(session.user, room))

Example 6

Project: sqlalchemy-utils Source File: test_sort_query.py
    def test_calculated_value_ascending(self, session, Article, Category):
        query = session.query(
            Category, sa.func.count(Article.id).label('articles')
        )
        query = sort_query(query, 'articles')
        assert_contains('ORDER BY articles ASC', query)

Example 7

Project: sqlalchemy-utils Source File: test_sort_query.py
    def test_calculated_value_descending(self, session, Article, Category):
        query = session.query(
            Category, sa.func.count(Article.id).label('articles')
        )
        query = sort_query(query, '-articles')
        assert_contains('ORDER BY articles DESC', query)

Example 8

Project: networking-infoblox Source File: infoblox_db.py
def get_next_authority_member_for_ipam(session, grid_id):
    q = (session.query(
         ib_models.InfobloxGridMember,
         ib_models.InfobloxGridMember.member_id,
         func.count(ib_models.InfobloxNetworkView.id).label('count')).
         outerjoin(ib_models.InfobloxNetworkView,
                   ib_models.InfobloxNetworkView.authority_member_id ==
                   ib_models.InfobloxGridMember.member_id).
         filter(ib_models.InfobloxGridMember.grid_id == grid_id,
                ib_models.InfobloxGridMember.member_status ==
                const.MEMBER_STATUS_ON,
                ib_models.InfobloxGridMember.member_type !=
                const.MEMBER_TYPE_REGULAR_MEMBER).
         group_by(ib_models.InfobloxNetworkView.authority_member_id).
         order_by('count'))
    res = q.first()
    authority_member = res[0]
    return authority_member

Example 9

Project: pajbot Source File: pleblist.py
    def get(self):
        with DBManager.create_session_scope() as session:
            # songs = session.query(PleblistSong, func.count(PleblistSong.song_info).label('total')).group_by(PleblistSong.youtube_id).order_by('total DESC')
            songs = session.query(PleblistSong, func.count(PleblistSong.youtube_id).label('total')).group_by(PleblistSong.youtube_id).order_by('total DESC')

            log.info(songs)
            log.info(songs.all())

            return pajbot.web.utils.jsonify_list(
                    'songs',
                    songs,
                    default_limit=50,
                    max_limit=500,
                    base_url=url_for(self.endpoint, _external=True),
                    jsonify_method=jsonify_query,
                    )

Example 10

Project: pygotham Source File: models.py
    def __iter__(self):
        """Iterate over the schedule for the day."""
        if not self.rooms:
            raise StopIteration

        def rowspan(start, end):
            """Find the rowspan for an entry in the schedule table.

            This uses a binary search for the given end time from a
            sorted list of start times in order to find the index of the
            first start time that occurs after the given end time. This
            method is used to prevent issues that can occur with
            overlapping start and end times being included in the same
            list.
            """
            return bisect_left(times, end) - times.index(start)

        times = sorted({slot.start for slot in self.slots})
        # While we typically only care about the start times here, the
        # list is iterated over two items at a time. Without adding a
        # final element, the last time slot would be omitted. Any value
        # could be used here as bisect_left only assumes the list is
        # sorted, but using a meaningful value feels better.
        times.append(self.slots[-1].end)

        slots = db.session.query(
            Slot.id,
            Slot.content_override,
            Slot.kind,
            Slot.start,
            Slot.end,
            func.count(rooms_slots.c.slot_id).label('room_count'),
            func.min(Room.order).label('order'),
        ).join(rooms_slots, Room).filter(Slot.day == self).order_by(
            func.count(rooms_slots.c.slot_id), func.min(Room.order)
        ).group_by(
            Slot.id, Slot.content_override, Slot.kind, Slot.start, Slot.end
        ).all()

        for time, next_time in pairwise(times):
            row = {'time': time, 'slots': []}
            for slot in slots:
                if slot.start == time:
                    slot.rowspan = rowspan(slot.start, slot.end)
                    slot.colspan = slot.room_count
                    if not slot.content_override:
                        slot.presentation = Presentation.query.filter(
                            Presentation.slot_id == slot.id).first()
                    row['slots'].append(slot)
            if row['slots'] or next_time is None:
                yield row

Example 11

Project: Snoopy Source File: main.py
@app.route('/client/list', methods=['POST'])
@login_required
def client_list_json():
    if not request.form.has_key('monitor'):
        logging.error('No monitor specified. This should not happen.')
        return jsonify(success=True, clients=[])
    monitor = request.form['monitor']
    try:
        with db.SessionCtx() as session:
            clients = session.query(
                db.Probe.device_mac,
                func.count(distinct(db.Probe.proximity_session)).label('cnt')
            )
            if monitor != '*':
                clients = clients.filter_by(monitor=monitor)
            clients = clients.group_by(db.Probe.device_mac)
            clients = clients.order_by('cnt DESC').all()
            clients = [{'mac': c[0], 'n_sessions': c[1]} for c in clients]
            return jsonify(success=True, clients=clients)
    except Exception:
        logging.exception('Error getting probed device list:')
        return jsonify(success=False, errors=['Internal error'])

Example 12

Project: baruwa2 Source File: pdfreport.py
    def command(self):
        "send"
        self.init()

        import baruwa
        pkgname = 'baruwa'
        here = os.path.dirname(
                    os.path.dirname(os.path.abspath(baruwa.__file__))
                )
        path = os.path.join(here, 'baruwa', 'templates')
        logo = os.path.join(here, 'baruwa', 'public', 'imgs', 'logo.png')
        localedir = os.path.join(here, 'baruwa', 'i18n')
        cache_dir = os.path.join(self.conf['cache_dir'], 'templates')
        mako_lookup = TemplateLookup(
                        directories=[path],
                        error_handler=handle_mako_error,
                        module_directory=cache_dir,
                        input_encoding='utf-8',
                        default_filters=['escape'],
                        output_encoding='utf-8',
                        encoding_errors='replace',
                        imports=['from webhelpers.html import escape']
                        )

        mailer = Mailer(get_conf_options(self.conf))
        mailer.start()
        users = Session\
                .query(User)\
                .filter(User.active == True)\
                .filter(User.send_report == True).all()
        #localedir = os.path.join(self.conf['here'], 'baruwa', 'i18n')
        for user in users:
            host_url = self.conf['baruwa.default.url']
            sentry = 0
            language = 'en'
            if user.is_domain_admin:
                orgs = [group.id for group in user.organizations]
                domains = Session\
                        .query(Domain.site_url, Domain.language)\
                        .join(domain_owners)\
                        .filter(Domain.status == True)\
                        .filter(domain_owners.c.organization_id.in_(orgs))\
                        .all()
                if domains:
                    host_url, language = domains.pop(0)
            if user.is_peleb:
                domains = [(domain.site_url, domain.language)
                            for domain in user.domains
                            if domain.status == True]
                if domains:
                    host_url, language = domains.pop(0)
            if language == 'en' and 'domains' in locals() and domains:
                while domains:
                    _, language = domains.pop(0)
                    if language != 'en':
                        break
            translator = set_lang(language, pkgname, localedir)
            _ = translator.ugettext
            reports = {
                        '1': {'address': 'from_address', 'sort': 'count',
                            'title': _('Top Senders by Quantity')},
                        '2': {'address': 'from_address', 'sort': 'size',
                            'title': _('Top Senders by Volume')},
                        '3': {'address': 'from_domain', 'sort': 'count',
                            'title': _('Top Sender Domains by Quantity')},
                        '4': {'address': 'from_domain', 'sort': 'size',
                            'title': _('Top Sender Domains by Volume')},
                        '5': {'address': 'to_address', 'sort': 'count',
                            'title': _('Top Recipients by Quantity')},
                        '6': {'address': 'to_address', 'sort': 'size',
                            'title': _('Top Recipients by Volume')},
                        '7': {'address': 'to_domain', 'sort': 'count',
                            'title': _('Top Recipient Domains By Quantity')},
                        '8': {'address': 'to_domain', 'sort': 'size',
                            'title': _('Top Recipient Domains By Volume')},
                        '9': {'address': '', 'sort': '',
                            'title': _('Spam Score distribution')},
                        '10': {'address': 'clientip', 'sort': 'count',
                            'title': _('Top mail hosts by quantity')},
                        '11': {'address': '', 'sort': '',
                            'title': _('Total messages [ After SMTP ]')}
                        }
            pieheadings = ('', _('Address'), _('Count'), _('Volume'), '')
            totalsheaders = dict(date=_('Date'), mail=_('Mail totals'),
                            spam=_('Spam totals'), virus=_('Virus totals'),
                            volume=_('Mail volume'), totals=_('Totals'))
            pdfcreator = PDFReport(logo, _('Baruwa mail report'))
            for reportid in ['1', '2', '3', '4', '5', '6', '7', '8', '10']:
                sortby = reports[reportid]['sort']
                if user.account_type == 3 and reportid in ['7', '8']:
                    data = None
                else:
                    query = ReportQuery(user, reportid)
                    if int(self.options.days) > 0:
                        a_day = datetime.timedelta(days=self.options.days)
                        startdate = now().date() - a_day
                        query = query.get().filter(Message.timestamp >
                                str(startdate))
                        data = query[:10]
                    else:
                        data = query.get()[:10]
                if data:
                    sentry += 1
                    pdfcreator.add(data, reports[reportid]['title'],
                                pieheadings, sortby)
            query = Session.query(Message.date,
                                func.count(Message.date).label('mail_total'),
                                func.sum(case([(Message.virusinfected > 0, 1)],
                                else_=0)).label('virus_total'),
                                func.sum(case([(and_(Message.virusinfected ==
                                0, Message.spam > 0), 1)], else_=0))\
                                .label('spam_total'), func.sum(Message.size)\
                                .label('total_size')).group_by(Message.date)\
                                .order_by(desc(Message.date))
            uquery = UserFilter(Session, user, query)
            query = uquery.filter()
            data = query.all()
            if data:
                if not sentry:
                    sentry += 1
                pdfcreator.add(data, _('Message Totals'), totalsheaders,
                                chart='bar')
            if sentry:
                template = mako_lookup.get_template('/email/pdfreports.txt')
                text = template.render(user=user, url=host_url)
                displayname = '%s %s' % (user.firstname or '',
                                        user.lastname or '')
                email = Msg(author=[(_('Baruwa Reports'),
                                self.conf['baruwa.reports.sender'])],
                                to=[(displayname, user.email)],
                                subject=_('Baruwa usage report'))
                email.plain = text
                pdf_file = pdfcreator.build()
                email.attach('baruwa-reports.pdf',
                            data=pdf_file,
                            maintype='application',
                            subtype='pdf')
                try:
                    mailer.send(email)
                except (TransportFailedException, MessageFailedException), err:
                    print >> sys.stderr, ("Error sending to: %s, Error: %s" %
                            (user.email, err))
        mailer.stop()

Example 13

Project: baruwa2 Source File: pdfreportsng.py
def domain_pie_query(domain, reportid, num_of_days=0):
    "Run domain query"
    queryfield = getattr(Message, REPORTS[reportid]['address'])
    orderby = REPORTS[reportid]['sort']
    query = Session.query(queryfield.label('address'),
                        func.count(queryfield).label('count'),
                        func.sum(Message.size).label('size'))
    if reportid == '10':
        query = query.filter(queryfield != u'127.0.0.1')\
                        .group_by(queryfield)\
                        .order_by(desc(orderby))
    else:
        query = query.filter(queryfield != u'')\
                        .group_by(queryfield)\
                        .order_by(desc(orderby))
    if reportid in ['5', '6', '7', '8']:
        query = query.filter(Message.to_domain == domain)
    else:
        query = query.filter(func._(or_(Message.from_domain == domain,
                                        Message.to_domain == domain)))
    if int(num_of_days) > 0:
        numofdays = datetime.timedelta(days=num_of_days)
        current_time = arrow.utcnow()
        startdate = current_time - numofdays
        query = query.filter(between(Message.timestamp,
                                    startdate.datetime,
                                    current_time.datetime))
    data = query[:10]
    return data

Example 14

Project: baruwa2 Source File: query.py
    def __init__(self, dbsession, user):
        self.dbsession = dbsession
        self.user = user
        self.query = self.dbsession.query(
            func.count(Message.id).label('total'),
            func.sum(case([(and_(Message.virusinfected == 0,
                Message.nameinfected == 0, Message.otherinfected == 0,
                Message.spam == 0, Message.highspam == 0), 1)],
                else_=0)).label('clean'),
            func.sum(case([(Message.virusinfected > 0, 1)],
                else_=0)).label('virii'),
            func.sum(case([(and_(Message.highspam == 0,
                Message.spam == 0, Message.virusinfected == 0,
                or_(Message.nameinfected > 0, Message.otherinfected > 0)), 1)],
                else_=0)).label('infected'),
            func.sum(case([(and_(Message.virusinfected == 0,
                Message.otherinfected == 0, Message.nameinfected == 0,
                or_(Message.spam > 0, Message.highspam > 0)), 1)],
                else_=0)).label('spam'),
            func.sum(case([(and_(Message.virusinfected == 0,
                Message.otherinfected == 0, Message.nameinfected == 0,
                Message.spam > 0, Message.highspam == 0), 1)],
                else_=0)).label('lowspam'),
            func.sum(case([(and_(Message.virusinfected == 0,
                Message.otherinfected == 0, Message.nameinfected == 0,
                Message.highspam > 0), 1)],
                else_=0)).label('highspam'))\
                .filter(Message.timestamp.between(
                        ustartday(self.user.timezone),
                        uendday(self.user.timezone)))

Example 15

Project: baruwa2 Source File: query.py
    def __init__(self, user, reportid, filters=None):
        "Init"
        self.dbsession = Session
        self.user = user
        self.reportid = reportid
        self.model = None
        self.isaggr = False
        self.filters = filters

        queryfield = getattr(Message, REPORTS[self.reportid]['address'])
        orderby = REPORTS[reportid]['sort']
        if (self.reportid in ['3', '4', '7', '8']
            and self.user.is_superadmin
            and not self.filters):
            # domains
            self.isaggr = True
            if self.reportid in ['3', '4']:
                # src
                self.model = SrcMessageTotals
                self.query = self.dbsession\
                            .query(SrcMessageTotals.id.label('address'),
                            SrcMessageTotals.total.label('count'),
                            SrcMessageTotals.volume.label('size'))\
                            .order_by(desc(orderby))
            else:
                # dst
                self.model = DstMessageTotals
                self.query = self.dbsession\
                            .query(DstMessageTotals.id.label('address'),
                            DstMessageTotals.total.label('count'),
                            DstMessageTotals.volume.label('size'))\
                            .order_by(desc(orderby))
        else:
            # emails & relays
            self.query = self.dbsession.query(queryfield.label('address'),
                                    func.count(queryfield).label('count'),
                                    func.sum(Message.size).label('size'))
            if self.reportid != '10':
                self.query = self.query.filter(queryfield != u'')\
                            .group_by(queryfield).order_by(desc(orderby))
            else:
                self.query = self.query.filter(queryfield != u'127.0.0.1')\
                            .group_by(queryfield).order_by(desc(orderby))
        if self.isaggr:
            uquery = AggrFilter(self.query)
        else:
            uquery = UserFilter(self.dbsession,
                                self.user,
                                self.query)
        if self.reportid not in ['5', '6', '7', '8']:
            self.query = uquery()
        if self.reportid in ['5', '6', '7', '8']:
            if not self.user.is_superadmin:
                uquery.setdirection('in')
                self.query = uquery()
            else:
                flf = self.model.id if self.isaggr else Message.to_domain
                self.query = self.query.filter(flf
                            .in_(self.dbsession.query(Domain.name)
                            .filter(Domain.status == true())))

Example 16

Project: bubbles Source File: ops.py
@count_duplicates.register("sql")
def _(ctx, obj, keys=None, threshold=1,
                       record_count_label="record_count"):
    """Returns duplicate rows based on `keys`. `threshold` is lowest number of
    duplicates that has to be present to be returned. By default `threshold`
    is 1. If no keys are specified, then all columns are considered."""

    if not threshold or threshold < 1:
        raise ValueError("Threshold should be at least 1 "
                         "meaning 'at least one duplcate'.")

    statement = obj.sql_statement()

    count_field = Field(record_count_label, "integer")

    if keys:
        keys = prepare_key(keys)
        group = [statement.c[str(field)] for field in keys]
        fields = list(keys)
        fields.append(count_field)
        out_fields = FieldList(*fields)
    else:
        group = list(statement.columns)
        out_fields = obj.fields.clone() + FieldList(count_field)

    counter = sqlalchemy.func.count("*").label(record_count_label)
    selection = group + [counter]
    condition = counter > threshold

    statement = sql.expression.select(selection,
                                   from_obj=statement,
                                   group_by=group,
                                   having=condition)

    result = obj.clone_statement(statement=statement, fields=out_fields)
    return result

Example 17

Project: bubbles Source File: ops.py
@assert_unique.register("sql")
def _(ctx, obj, key=None):
    """Checks whether the receiver has unique values for `key`. If `key` is
    not specified, then all fields from `obj` are considered."""

    statement = obj.sql_statement().alias("__u")

    if key:
        key = prepare_key(key)
        group = [statement.c[field] for field in key]
    else:
        group = list(statement.columns)

    counter = sqlalchemy.func.count("*").label("duplicate_count")
    selection = [counter]
    condition = counter > 1

    statement = sql.expression.select(selection,
                                       from_obj=statement,
                                       group_by=group,
                                       having=condition,
                                       limit=1)

    result = list(obj.store.execute(statement))

    if len(result) != 0:
        raise ProbeAssertionError

    return obj

Example 18

Project: cobra Source File: route.py
@web.route('/report/<int:project_id>', methods=['GET'])
def report(project_id):
    # 待搜索的task id
    search_task_id = request.args.get("search_task", "")
    search_task_id = None if search_task_id == "all" or search_task_id == "" else search_task_id
    # 判断project id 和 task id 是否存在
    # 获取 project id 相关的信息
    project_info = CobraProjects.query.filter(CobraProjects.id == project_id).first()
    if project_info is None:
        # 没有该project id
        abort(404)
    # 获取task信息
    if search_task_id is None:
        # 没有传入task id,获取该project的最新task,用于获取task的基础信息
        task_info = CobraTaskInfo.query.filter(
            CobraTaskInfo.target == project_info.repository
        ).order_by(CobraTaskInfo.id.desc()).first()
    else:
        # 传入了task id,获取信息
        task_info = CobraTaskInfo.query.filter(CobraTaskInfo.id == search_task_id).first()

    # 判断是否取得task info
    if task_info is None:
        abort(404)

    # 获取 task info 中的部分信息
    code_number = u"统计中..." \
        if task_info.code_number is None or task_info.code_number == 0 \
        else common.convert_number(task_info.code_number)

    # 时间戳->datetime
    time_start = time.strftime("%H:%M:%S", time.localtime(task_info.time_start))
    time_end = time.strftime("%H:%M:%S", time.localtime(task_info.time_end))

    # 任务信息
    tasks = CobraTaskInfo.query.filter_by(target=project_info.repository).order_by(CobraTaskInfo.updated_at.desc()).all()

    # 没有指定task id,获取该project的所有扫描结果
    # 指定了task id,选取该task的结果
    if search_task_id is None:
        # Default task id
        search_task_id = tasks[0].id

        # 获取漏洞总数
        scan_results_number = CobraResults.query.filter(CobraResults.project_id == project_id).count()
        # scan_results_number = db.session.query(func.count()).filter(CobraResults.project_id == project_id)
        # 待修复的漏洞总数
        unrepair_results_number = CobraResults.query.filter(
            CobraResults.project_id == project_id, CobraResults.status < 2
        ).count()
        # 已修复的漏洞总数
        repaired_results_number = CobraResults.query.filter(
            CobraResults.project_id == project_id, CobraResults.status == 2
        ).count()
        # 获取出现的待修复的漏洞类型
        showed_vul_type = db.session.query(
            func.count().label("showed_vul_number"), CobraVuls.name, CobraVuls.id
        ).filter(
            and_(
                CobraResults.project_id == project_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id
            )
        ).group_by(CobraVuls.name, CobraVuls.id).all()
        # 获取出现的待修复的规则类型
        showed_rule_type = db.session.query(CobraRules.description, CobraRules.id).filter(
            and_(
                CobraResults.project_id == project_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id
            )
        ).group_by(CobraRules.id).all()
        # 获取不同等级的 已修复 漏洞数量
        showed_repaired_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.project_id == project_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status == 2,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()
        # 获取不同等级的 未修复 漏洞数量
        showed_unrepair_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.project_id == project_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()
        # 获取不同等级的 总共 漏洞数量
        showed_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.project_id == project_id,
                CobraResults.rule_id == CobraRules.id,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()
    else:
        # 指定了task id, 选取该task的结果
        # 全部漏洞数量
        scan_results_number = CobraResults.query.filter(
            CobraResults.task_id == search_task_id
        ).count()
        # 待修复的漏洞数量
        unrepair_results_number = CobraResults.query.filter(
            CobraResults.task_id == search_task_id, CobraResults.status < 2
        ).count()
        # 已修复的漏洞数量
        repaired_results_number = CobraResults.query.filter(
            CobraResults.task_id == search_task_id, CobraResults.status == 2
        ).count()
        # 获取出现的待修复的漏洞类型
        showed_vul_type = db.session.query(
            func.count().label("showed_vul_number"), CobraVuls.name, CobraVuls.id
        ).filter(
            and_(
                CobraResults.task_id == search_task_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id
            )
        ).group_by(CobraVuls.name, CobraVuls.id).all()
        # 获取出现的待修复的规则类型
        showed_rule_type = db.session.query(CobraRules.description, CobraRules.id).filter(
            and_(
                CobraResults.task_id == search_task_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id
            )
        ).group_by(CobraRules.id).all()
        # 获取不同等级的 已修复 漏洞数量
        showed_repaired_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.task_id == search_task_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status == 2,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()
        # 获取不同等级的 未修复 漏洞数量
        showed_unrepair_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.task_id == search_task_id,
                CobraResults.rule_id == CobraRules.id,
                CobraResults.status < 2,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()
        # 获取不同等级的 总共 漏洞数量
        showed_level_number = db.session.query(
            func.count().label('vuln_number'), CobraRules.level
        ).filter(
            and_(
                CobraResults.task_id == search_task_id,
                CobraResults.rule_id == CobraRules.id,
                CobraVuls.id == CobraRules.vul_id,
            )
        ).group_by(CobraRules.level).all()

    # 提供给筛选列表
    select_vul_type = list()
    # 存下每种漏洞数量
    chart_vuls_number = list()
    for r in showed_vul_type:
        select_vul_type.append([r[1], r[2]])
        chart_vuls_number.append({"vuls_name": r[1], "vuls_number": r[0]})
    select_rule_type = list()
    for r in showed_rule_type:
        select_rule_type.append([r[0], r[1]])
    # 统计不同等级的漏洞信息
    # 1-低危, 2-中危, 3-高危, 其他值-未定义
    # 总共数量
    low_level_number = medium_level_number = high_level_number = unknown_level_number = 0
    for every_level in showed_level_number:
        if every_level[1] == 1:
            low_level_number = every_level[0]
        elif every_level[1] == 2:
            medium_level_number = every_level[0]
        elif every_level[1] == 3:
            high_level_number = every_level[0]
        else:
            unknown_level_number = every_level[0]
    # 已经修复的数量
    repaired_low_level_number = repaired_medium_level_number = repaired_high_level_number = repaired_unknown_level_number = 0
    for every_level in showed_repaired_level_number:
        if every_level[1] == 1:
            repaired_low_level_number = every_level[0]
        elif every_level[1] == 2:
            repaired_medium_level_number = every_level[0]
        elif every_level[1] == 3:
            repaired_high_level_number = every_level[0]
        else:
            repaired_unknown_level_number = every_level[0]
    # 未修复的数量
    unrepair_low_level_number = unrepair_medium_level_number = unrepair_high_level_number = unrepair_unknown_level_number = 0
    for every_level in showed_unrepair_level_number:
        if every_level[1] == 1:
            unrepair_low_level_number = every_level[0]
        elif every_level[1] == 2:
            unrepair_medium_level_number = every_level[0]
        elif every_level[1] == 3:
            unrepair_high_level_number = every_level[0]
        else:
            unrepair_unknown_level_number = every_level[0]

    # 漏洞状态信息
    vuls_status = [
        {"status": "All", "value": 0},
        {"status": "Fixed", "value": 1},
        {"status": "Not fixed", "value": 2},
        {"status": "Other", "value": 3},
    ]

    data = {
        "project_id": project_id,
        "task_id": search_task_id,
        "select_vul_type": select_vul_type,
        "select_rule_type": select_rule_type,
        "chart_vuls_number": chart_vuls_number,
        "task_info": task_info,
        "project_info": project_info,
        "code_number": code_number,
        "file_count": common.convert_number(task_info.file_count),
        "tasks": tasks,
        "vuls_status": vuls_status,
        "task_time": {
            "time_start": time_start,
            "time_end": time_end,
            "time_consume": common.convert_time(task_info.time_consume)
        },
        "vuls_number": {
            "unrepair": {
                "low": unrepair_low_level_number,
                "medium": unrepair_medium_level_number,
                "high": unrepair_high_level_number,
                "unknown": unrepair_unknown_level_number,
            },
            "repaired": {
                "low": repaired_low_level_number,
                "medium": repaired_medium_level_number,
                "high": repaired_high_level_number,
                "unknown": repaired_unknown_level_number,
            },
            "total_number": {
                "low": low_level_number,
                "medium": medium_level_number,
                "high": high_level_number,
                "unknown": unknown_level_number
            },
            "result_number": {
                "scan_result_number": scan_results_number,
                "repaired_result_number": repaired_results_number,
                "unrepair_result_number": unrepair_results_number,
            }
        },
    }
    return render_template('report.html', data=data)

Example 19

Project: sqlalchemy Source File: test_baked.py
    def test_subquery_eagerloading(self):
        User = self.classes.User
        Address = self.classes.Address
        Order = self.classes.Order

        # Override the default bakery for one with a smaller size. This used to
        # trigger a bug when unbaking subqueries.
        self.bakery = baked.bakery(size=3)
        base_bq = self.bakery(lambda s: s.query(User))

        base_bq += lambda q: q.options(subqueryload(User.addresses),
                                       subqueryload(User.orders))
        base_bq += lambda q: q.order_by(User.id)

        assert_result = [
            User(id=7,
                addresses=[Address(id=1, email_address='[email protected]')],
                orders=[Order(id=1), Order(id=3), Order(id=5)]),
            User(id=8, addresses=[
                Address(id=2, email_address='[email protected]'),
                Address(id=3, email_address='[email protected]'),
                Address(id=4, email_address='[email protected]'),
            ]),
            User(id=9,
                addresses=[Address(id=5)],
                orders=[Order(id=2), Order(id=4)]),
            User(id=10, addresses=[])
        ]

        for i in range(4):
            for cond1, cond2 in itertools.product(
                    *[(False, True) for j in range(2)]):
                bq = base_bq._clone()

                sess = Session()

                if cond1:
                    bq += lambda q: q.filter(User.name == 'jack')
                else:
                    bq += lambda q: q.filter(User.name.like('%ed%'))

                if cond2:
                    ct = func.count(Address.id).label('count')
                    subq = sess.query(
                        ct,
                        Address.user_id).group_by(Address.user_id).\
                        having(ct > 2).subquery()

                    bq += lambda q: q.join(subq)

                if cond2:
                    if cond1:
                        def go():
                            result = bq(sess).all()
                            eq_([], result)
                        self.assert_sql_count(testing.db, go, 1)
                    else:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[1:2], result)
                        self.assert_sql_count(testing.db, go, 3)
                else:
                    if cond1:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[0:1], result)
                        self.assert_sql_count(testing.db, go, 3)
                    else:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[1:3], result)
                        self.assert_sql_count(testing.db, go, 3)

                sess.close()

Example 20

Project: sqlalchemy Source File: test_baked.py
    def _test_baked_lazy_loading(self, set_option):
        User, Address = self.classes.User, self.classes.Address

        base_bq = self.bakery(
            lambda s: s.query(User))

        if set_option:
            base_bq += lambda q: q.options(baked_lazyload(User.addresses))

        base_bq += lambda q: q.order_by(User.id)

        assert_result = self.static.user_address_result

        for i in range(4):
            for cond1, cond2 in itertools.product(
                    *[(False, True) for j in range(2)]):
                bq = base_bq._clone()

                sess = Session()

                if cond1:
                    bq += lambda q: q.filter(User.name == 'jack')
                else:
                    bq += lambda q: q.filter(User.name.like('%ed%'))

                if cond2:
                    ct = func.count(Address.id).label('count')
                    subq = sess.query(
                        ct,
                        Address.user_id).group_by(Address.user_id).\
                        having(ct > 2).subquery()

                    bq += lambda q: q.join(subq)

                if cond2:
                    if cond1:
                        def go():
                            result = bq(sess).all()
                            eq_([], result)
                        self.assert_sql_count(testing.db, go, 1)
                    else:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[1:2], result)
                        self.assert_sql_count(testing.db, go, 2)
                else:
                    if cond1:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[0:1], result)
                        self.assert_sql_count(testing.db, go, 2)
                    else:
                        def go():
                            result = bq(sess).all()
                            eq_(assert_result[1:3], result)
                        self.assert_sql_count(testing.db, go, 3)

                sess.close()

Example 21

Project: lagesonum Source File: bottle_app.py
@route('/display')
@view('views/display')
def display():

    oldest_to_be_shown = datetime.datetime.combine(datetime.date.today() - datetime.timedelta(days=MAX_DAYS),
                                                   datetime.datetime.min.time())
    # TODO optimize query, so we don't need to iterate manually, e.g. by selecing only count > min_count!
    # TODO make Place variable and part of WHERE

    verified_numbers = request['orm'].query(Number.number, Number.timestamp, User.username).join(User).\
        filter(Number.timestamp >= oldest_to_be_shown).order_by(Number.timestamp.desc(), Number.number).all()
    numbers = request['orm'].query(Number.number, Number.timestamp, func.count(Number.number).label('count')).\
        filter(Number.timestamp >= oldest_to_be_shown).filter_by(user=None).group_by(Number.number).\
        order_by(Number.timestamp.desc(), Number.number).all()

    # filter numbers entered often enough
    # format numbers for later output
    verified_output = set([n.number for n in verified_numbers][:DISPLAY_SIZE])
    unverified_output = [{'num': n.number, 'count': int(n.count)}
                         for n in numbers if int(n.count) >= MIN_COUNT][:DISPLAY_SIZE]
    for n in unverified_output:
        if n['num'] in verified_output:
            verified_output.remove(n['num'])

    display_output = sorted([{'num': n, 'count': 1} for n in verified_output] + unverified_output,
                            key=lambda n: n['num'])

    since = format_datetime(oldest_to_be_shown, 'short', locale=get_valid_locale(request.locale))
    return {'numbers': display_output,
            'since': since,
            'min_count': MIN_COUNT
            }

Example 22

Project: autonomie Source File: user.py
def add_custom_datas_headers(writer, query):
    """
    Add custom headers that are not added through automation
    """
    # Compte analytique
    query = DBSESSION().query(
        func.count(COMPANY_EMPLOYEE.c.company_id).label('nb')
    )
    query = query.group_by(COMPANY_EMPLOYEE.c.account_id)
    code_compta_count = query.order_by(desc("nb")).first()
    if code_compta_count:
        code_compta_count = code_compta_count[0]
        for index in range(0, code_compta_count):
            new_header = {
                'label': "Compte_analytique {0}".format(index + 1),
                'name': "code_compta_{0}".format(index + 1),
            }
            writer.add_extra_header(new_header)

    return writer

Example 23

Project: bodhi Source File: generic.py
def get_top_testers(request):
    db = request.db
    blacklist = request.registry.settings.get('stats_blacklist').split()
    days = int(request.registry.settings.get('top_testers_timeframe', 7))
    start_time = datetime.datetime.utcnow() - datetime.timedelta(days=days)

    query = db.query(
        models.User,
        sa.func.count(models.User.comments).label('count_1')
    ).join(models.Comment)
    query = query\
        .order_by('count_1 desc')\
        .filter(models.Comment.timestamp > start_time)

    for user in blacklist:
        query = query.filter(models.User.name != user)

    return query\
        .group_by(models.User)\
        .limit(5)\
        .all()

Example 24

Project: fas Source File: group.py
    @identity.require(turbogears.identity.not_anonymous())
    @expose(template="genshi-text:fas.templates.group.dump", format="text",
            content_type='text/plain; charset=utf-8')
    @expose(allow_json=True)
    def dump(self, groupname=None, role_type=None):
        if not groupname:
            stmt = select([People.privacy, People.username, People.email,
                People.human_name, "'user'", 's.sponsored'],
                from_obj=PeopleTable.outerjoin(select([PersonRoles.sponsor_id,
                        func.count(PersonRoles.sponsor_id).label('sponsored')]
                        ).group_by(PersonRoles.sponsor_id
                            ).correlate().alias('s')
                )).order_by(People.username)
        else:
            stmt = select([People.privacy, People.username, People.email,
                People.human_name, PersonRoles.role_type, 's.sponsored'],
                from_obj=GroupsTable.join(PersonRolesTable).join(PeopleTable,
                    onclause=PeopleTable.c.id==PersonRolesTable.c.person_id
                    ).outerjoin(select([PersonRoles.sponsor_id,
                        func.count(PersonRoles.sponsor_id).label('sponsored')]
                        ).where(and_(
                            PersonRoles.group_id==Groups.id,
                            Groups.name==groupname)).group_by(
                                PersonRoles.sponsor_id).correlate().alias('s')
                            )).where(and_(Groups.name==groupname,
                                PersonRoles.role_status=='approved')
                                ).order_by(People.username)

        people = []
        if identity.in_any_group(config.get('admingroup', 'accounts'),
                config.get('systemgroup', 'fas-system')):
            user = 'admin'
        elif identity.current.anonymous:
            user = 'anonymous'
        else:
            user = 'public'
            username = identity.current.user_name

        for row in stmt.execute():
            person = list(row[1:])
            if not row['sponsored']:
                person[-1] = 0
            if row['privacy'] and user != 'admin' \
                    and username != row['username']:
                # filter private data
                person[2] = u''
            people.append(person)
        return dict(people=people)

Example 25

Project: snorkel Source File: context.py
    def child_context_stats(self, parent_context):
        """
        Given a parent context class, gets all the child context classes, and returns histograms of the number
        of children per parent.
        """
        session = object_session(self)
        parent_name = parent_context.__table__.name

        # Get all the child context relationships
        rels = [r for r in inspect(parent_context).relationships if r.back_populates == parent_name]
        
        # Print the histograms for each child context, and recurse!
        for rel in rels:
            c  = rel.mapper.class_
            fk = list(rel._calculated_foreign_keys)[0]
                
            # Query for average number of child contexts per parent context
            label = 'Number of %ss per %s' % (c.__table__.name, parent_name)
            query = session.query(fk, func.count(c.id).label(label)).group_by(fk) 
                
            # Render as panadas dataframe histogram
            df = pd.read_sql(query.statement, query.session.bind)
            df.hist(label)

            # Recurse to grandhildren
            self.child_context_stats(c)

Example 26

Project: holmes-api Source File: request.py
    @classmethod
    def get_status_code_info(self, domain_name, db):
        result = []

        query = db \
            .query(
                Request.status_code,
                sa.func.count(Request.status_code).label('total')
            ) \
            .filter(Request.domain_name == domain_name) \
            .group_by(Request.status_code) \
            .all()

        for i in query:
            result.append({
                'code': i.status_code,
                'title': get_status_code_title(i.status_code),
                'total': i.total
            })

        return result

Example 27

Project: holmes-api Source File: request.py
    @classmethod
    def get_requests_count_by_status(self, db, limit=1000):
        per_domains = {'_all': defaultdict(int)}

        from holmes.models.domain import Domain
        for domain in db.query(Domain).all():
            requests = db \
                .query(
                    Request.status_code,
                    sa.func.count(Request.id).label('count')
                ) \
                .filter(Request.domain_name == domain.name) \
                .group_by(Request.status_code) \
                .order_by('count DESC') \
                .limit(limit) \
                .all()
            per_domains[domain.name] = requests

            # calculating all domains by counting each domain
            for req in requests:
                per_domains['_all'][req[0]] += req[1]

        per_domains['_all'] = per_domains['_all'].items()

        return per_domains

Example 28

Project: holmes-api Source File: violation.py
    @classmethod
    def get_most_common_violations_names(cls, db, sample_limit=50000):
        sample = db \
            .query(
                Violation.id,
                Violation.key_id,
                Key.name.label('key_name')
            ) \
            .filter(Violation.key_id == Key.id) \
            .filter(Violation.review_is_active == True) \
            .order_by(Violation.id.desc()) \
            .limit(sample_limit) \
            .subquery()

        return db \
            .query(
                sample.columns.key_name,
                sa.func.count(sample.columns.key_id).label('count')
            ) \
            .group_by(sample.columns.key_id) \
            .order_by('count desc').all()

Example 29

Project: holmes-api Source File: violation.py
    @classmethod
    def get_group_by_key_id_for_all_domains(cls, db):

        from holmes.models.violation import Violation  # to avoid circular dependency
        from holmes.models.domain import Domain  # to avoid circular dependency

        sample = db \
            .query(
                Violation.key_id.label('violations_key_id'),
                Violation.domain_id,
                sa.func.count(Violation.id).label('violation_count')
            ) \
            .filter(Violation.review_is_active == True) \
            .group_by(Violation.domain_id, Violation.key_id) \
            .subquery()

        return db \
            .query(
                sample.columns.violations_key_id,
                Domain.name.label('domain_name'),
                sample.columns.violation_count
            ) \
            .filter(Domain.id == sample.columns.domain_id) \
            .order_by('violation_count DESC') \
            .all()

Example 30

Project: holmes-api Source File: violation.py
    @classmethod
    def get_group_by_category_id_for_all_domains(cls, db):
        from holmes.models.keys import Key  # to avoid circular dependency
        from holmes.models.violation import Violation  # to avoid circular dependency

        data = db \
            .query(
                Violation.domain_id,
                Key.name,
                Key.category_id,
                sa.func.count(Key.category_id).label('violation_count')
            ) \
            .filter(Key.id == Violation.key_id) \
            .filter(Violation.review_is_active == True) \
            .group_by(Violation.domain_id) \
            .group_by(Key.category_id) \
            .order_by('violation_count DESC') \
            .all()

        result = defaultdict(list)

        for item in data:
            result[item.domain_id].append({
                'key_name': item.name,
                'category_id': item.category_id,
                'violation_count': item.violation_count
            })

        return result

Example 31

Project: holmes-api Source File: violation.py
    @classmethod
    def get_top_in_category_for_all_domains(cls, db, limit=1000):
        from holmes.models.keys import Key  # to avoid circular dependency
        from holmes.models.domain import Domain  # to avoid circular dependency
        from holmes.models.violation import Violation  # to avoid circular dependency

        sample = db \
            .query(
                Violation.key_id.label('violations_key_id'),
                Violation.domain_id,
                sa.func.count(Violation.id).label('violation_count')
            ) \
            .filter(Violation.review_is_active == True) \
            .group_by(Violation.domain_id, Violation.key_id) \
            .subquery()

        return db \
            .query(
                Domain.name,
                Key.category_id,
                Key.name,
                sample.columns.violation_count
            ) \
            .filter(sample.columns.domain_id == Domain.id) \
            .filter(sample.columns.violations_key_id == Key.id) \
            .group_by(
                sample.columns.domain_id,
                sample.columns.violations_key_id,
                Key.category_id
            ) \
            .order_by('violation_count DESC') \
            .limit(limit) \
            .all()

Example 32

Project: adfullssl Source File: scanner.py
  def check_compliance(self):
    """
    Analyzes the network logs to detect SSL non-compliant creatives, and save the SSL compliance
    into the databse.
    """
    def request_count(protocol):
      """
      Return the numver of requests for each creatives.

      :param protocol: 'https' or 'http'.
      :return: a tuple of creative id and the number of requests.
      """
      return self.db_session.query(
        ScanLog.creative_id.label('creative_id'),
        func.count(ScanLog.url).label('url_count')
      ).filter(
        ScanLog.created_at == datetime.date.today(),
        ScanLog.protocol == protocol
      ).group_by(
        ScanLog.creative_id
      ).subquery(protocol)

    def request_match_ids():
      """
      Return the ids of creatives that made difference numbers of requests over https and http.
      """
      https = request_count('https')
      http = request_count('http')

      return [t[0] for t in self.db_session.query(
        https.c.creative_id
      ).filter(
        https.c.creative_id == http.c.creative_id,
        https.c.url_count == http.c.url_count
      ).all()]

    creative_ids = [t[0] for t in self.db_session.query(
      ScanLog.creative_id
    ).filter(
      ScanLog.created_at == datetime.date.today()
    ).distinct().all()]
    print '# of creatives: %d' % len(creative_ids)

    noncompliant_ids = [t[0] for t in self.db_session.query(
      ScanLog.creative_id
    ).filter(
      ScanLog.created_at == datetime.date.today(),
      ScanLog.issue_id != IssueType.NO_ISSUE,
      ScanLog.issue_id != IssueType.NO_EXTERNAL,
      ScanLog.issue_id is not None,
      ScanLog.protocol == 'https'
    ).distinct().all()]

    compliant_ids = list(set(creative_ids) - set(noncompliant_ids))
    print '# of compliant: %d' % len(compliant_ids)
    print '# of non-compliant: %d' % len(noncompliant_ids)

    match_ids = request_match_ids()
    unmatch_ids = list(set(creative_ids) - set(match_ids))
    print '# of request match: %d' % len(match_ids)
    print '# of request unmatch: %d' % len(unmatch_ids)

    # Update the compliance status.
    if len(compliant_ids) > 0:
      self._update_creatives(compliant_ids, 'compliance=1')
    if len(noncompliant_ids) > 0:
      self._update_creatives(noncompliant_ids, 'compliance=0')

    # Update the request match status.
    if len(match_ids) > 0:
      self._update_creatives(match_ids, 'request_match=1')
    if len(unmatch_ids) > 0:
      self._update_creatives(unmatch_ids, 'request_match=0')

    self.db_session.commit()

Example 33

Project: build-relengapi Source File: grooming.py
@badpenny.periodic_task(seconds=3600)
def replicate(job_status):
    """Replicate objects between regions as necessary"""
    # fetch all files with at least one instance, but not a full complement
    # of instances
    num_regions = len(current_app.config['TOOLTOOL_REGIONS'])
    fi_tbl = tables.FileInstance
    f_tbl = tables.File
    session = current_app.db.session('relengapi')
    subq = session.query(
        fi_tbl.file_id,
        sa.func.count('*').label('instance_count'))
    subq = subq.group_by(fi_tbl.file_id)
    subq = subq.subquery()
    q = session.query(f_tbl)
    q = q.join(subq, f_tbl.id == subq.c.file_id)
    q = q.filter(subq.c.instance_count < num_regions)
    q = q.all()
    for file in q:
        replicate_file(session, file)
    session.commit()

Example 34

Project: security_monkey Source File: auditor_settings.py
    def get(self):
        """
            .. http:get:: /api/1/auditorsettings

            Get a list of AuditorSetting items

            **Example Request**:

            .. sourcecode:: http

                GET /api/1/auditorsettings HTTP/1.1
                Host: example.com
                Accept: application/json, text/javascript

            **Example Response**:

            .. sourcecode:: http

                HTTP/1.1 200 OK
                Content-Type: application/json

                {
                    count: 15,
                    items: [
                        {
                            id: 1,
                            account: "aws-account-name",
                            technology: "iamuser",
                            disabled: true,
                            issue: "User with password login.",
                            count: 15
                        },
                        ...
                    ]
                    auth: {
                        authenticated: true,
                        user: "[email protected]"
                    }
                }

            :statuscode 200: no error
            :statuscode 401: Authentication failure. Please login.
        """

        self.reqparse.add_argument('count', type=int, default=30, location='args')
        self.reqparse.add_argument('page', type=int, default=1, location='args')
        self.reqparse.add_argument('accounts', type=str, default=None, location='args')
        self.reqparse.add_argument('technologies', type=str, default=None, location='args')
        self.reqparse.add_argument('enabled', type=bool, default=None, location='args')
        self.reqparse.add_argument('issue', type=str, default=None, location='args')
        self.reqparse.add_argument('order_by', type=str, default=None, location='args')
        self.reqparse.add_argument('order_dir', type=str, default='Desc', location='args')
        args = self.reqparse.parse_args()

        page = args.pop('page', None)
        count = args.pop('count', None)
        for k, v in args.items():
            if not v:
                del args[k]

        query = AuditorSettings.query
        query = query.join((Account, Account.id == AuditorSettings.account_id))
        query = query.join((Technology, Technology.id == AuditorSettings.tech_id))

        if 'accounts' in args:
            accounts = args['accounts'].split(',')
            query = query.filter(Account.name.in_(accounts))

        if 'technologies' in args:
            technologies = args['technologies'].split(',')
            query = query.filter(Technology.name.in_(technologies))

        if 'enabled' in args:
            query = query.filter(AuditorSettings.disabled != bool(args['enabled']))

        if 'issue' in args:
            query = query.filter(AuditorSettings.issue_text == args['issue'])

        if 'order_by' in args:

            if args['order_by'] == 'account' and args['order_dir'] == 'Desc':
                query = query.order_by(Account.name.desc())
            elif args['order_by'] == 'account' and args['order_dir'] == 'Asc':
                query = query.order_by(Account.name.asc())

            elif args['order_by'] == 'technology' and args['order_dir'] == 'Desc':
                query = query.order_by(Technology.name.desc())
            elif args['order_by'] == 'technology' and args['order_dir'] == 'Asc':
                query = query.order_by(Technology.name.asc())

            elif args['order_by'] == 'enabled' and args['order_dir'] == 'Desc':
                query = query.order_by(AuditorSettings.disabled.asc())
            elif args['order_by'] == 'enabled' and args['order_dir'] == 'Asc':
                query = query.order_by(AuditorSettings.disabled.desc())

            elif args['order_by'] == 'issue' and args['order_dir'] == 'Desc':
                query = query.order_by(AuditorSettings.issue_text.desc())
            elif args['order_by'] == 'issue' and args['order_dir'] == 'Asc':
                query = query.order_by(AuditorSettings.issue_text.asc())

            elif args['order_by'] == 'issue_count':
                stmt = db.session.query(
                    ItemAudit.auditor_setting_id,
                    func.count('*').label('setting_count')
                ).group_by(
                    ItemAudit.auditor_setting_id
                ).subquery()

                query = query.join(
                    (stmt, AuditorSettings.id == stmt.c.auditor_setting_id)
                )

                if args['order_dir'] == 'Desc':
                    query = query.order_by(
                        stmt.c.setting_count.desc()
                    )
                elif args['order_dir'] == 'Asc':
                    query = query.order_by(
                        stmt.c.setting_count.asc()
                    )

        enabled_auditors = query.paginate(page, count)

        auditor_settings = []
        for auditor_setting in enabled_auditors.items:
            marshalled = marshal(auditor_setting.__dict__, AUDITORSETTING_FIELDS)
            marshalled = dict(
                marshalled.items() +
                {
                    'account': auditor_setting.account.name,
                    'technology': auditor_setting.technology.name,
                    'count': len(auditor_setting.issues)
                }.items()
            )
            marshalled['issue'] = marshalled['issue_text']
            del marshalled['issue_text']
            auditor_settings.append(marshalled)

        ret_dict = {
            'items': auditor_settings,
            'page': enabled_auditors.page,
            'total': enabled_auditors.total,
            'count': len(auditor_settings),
            'auth': self.auth_dict
        }

        return ret_dict, 200

Example 35

Project: monasca-api Source File: alarms_repository.py
    @sql_repository.sql_try_catch_block
    def get_alarms_count(self, tenant_id, query_parms=None, offset=None, limit=None):
        if not query_parms:
            query_parms = {}

        with self._db_engine.connect() as conn:
            parms = {}
            ad = self.ad
            am = self.am
            mdd = self.mdd
            mde = self.mde
            md = self.md
            a = self.a

            query_from = a.join(ad, ad.c.id == a.c.alarm_definition_id)

            parms['b_tenant_id'] = tenant_id

            group_by_columns = []

            if 'group_by' in query_parms:
                group_by_columns = query_parms['group_by']
                sub_group_by_columns = []
                metric_group_by = {'metric_name',
                                   'dimension_name',
                                   'dimension_value'}.intersection(set(query_parms['group_by']))
                if metric_group_by:
                    sub_query_columns = [am.c.alarm_id]
                    if 'metric_name' in metric_group_by:
                        sub_group_by_columns.append(mde.c.name.label('metric_name'))
                    if 'dimension_name' in metric_group_by:
                        sub_group_by_columns.append(md.c.name.label('dimension_name'))
                    if 'dimension_value' in metric_group_by:
                        sub_group_by_columns.append(md.c.value.label('dimension_value'))

                    sub_query_columns.extend(sub_group_by_columns)

                    sub_query_from = (mde.join(mdd, mde.c.id == mdd.c.metric_definition_id)
                                      .join(md, mdd.c.metric_dimension_set_id == md.c.dimension_set_id)
                                      .join(am, am.c.metric_definition_dimensions_id == mdd.c.id))

                    sub_query = (select(sub_query_columns)
                                 .select_from(sub_query_from)
                                 .distinct()
                                 .alias('metrics'))

                    query_from = query_from.join(sub_query, sub_query.c.alarm_id == a.c.id)

            query_columns = [func.count().label('count')]
            query_columns.extend(group_by_columns)

            query = (select(query_columns)
                     .select_from(query_from)
                     .where(ad.c.tenant_id == bindparam('b_tenant_id')))

            parms['b_tenant_id'] = tenant_id

            if 'alarm_definition_id' in query_parms:
                parms['b_alarm_definition_id'] = query_parms['alarm_definition_id']
                query = query.where(ad.c.id == bindparam('b_alarm_definition_id'))

            if 'state' in query_parms:
                parms['b_state'] = query_parms['state'].encode('utf8')
                query = query.where(a.c.state == bindparam('b_state'))

            if 'severity' in query_parms:
                severities = query_parms['severity'].split('|')
                query = query.where(
                    or_(ad.c.severity == bindparam('b_severity' + str(i)) for i in xrange(len(severities))))
                for i, s in enumerate(severities):
                    parms['b_severity' + str(i)] = s.encode('utf8')

            if 'lifecycle_state' in query_parms:
                parms['b_lifecycle_state'] = query_parms['lifecycle_state'].encode('utf8')
                query = query.where(a.c.lifecycle_state == bindparam('b_lifecycle_state'))

            if 'link' in query_parms:
                parms['b_link'] = query_parms['link'].encode('utf8')
                query = query.where(a.c.link == bindparam('b_link'))

            if 'state_updated_start_time' in query_parms:
                parms['b_state_updated_at'] = query_parms['state_updated_start_time'].encode("utf8")
                query = query.where(a.c.state_updated_at >= bindparam('b_state_updated_at'))

            if 'metric_name' in query_parms:
                subquery_md = (select([md])
                               .where(md.c.name == bindparam('b_metric_name'))
                               .distinct()
                               .alias('md_'))
                subquery = (select([a.c.id])
                            .select_from(am.join(a, a.c.id == am.c.alarm_id)
                                         .join(mdd, mdd.c.id == am.c.metric_definition_dimensions_id)
                                         .join(subquery_md, subquery_md.c.id == mdd.c.metric_definition_id))
                            .distinct())
                query = query.where(a.c.id.in_(subquery))
                parms['b_metric_name'] = query_parms['metric_name'].encode('utf8')

            if 'metric_dimensions' in query_parms:
                sub_query = select([a.c.id])
                sub_query_from = (a.join(am, am.c.alarm_id == a.c.id)
                                  .join(mdd,
                                        mdd.c.id ==
                                        am.c.metric_definition_dimensions_id))

                sub_query_md_base = select([md.c.dimension_set_id]).select_from(md)

                for i, metric_dimension in enumerate(query_parms['metric_dimensions']):
                    md_name = "b_md_name_{}".format(i)
                    md_value = "b_md_value_{}".format(i)

                    sub_query_md = (sub_query_md_base
                                    .where(md.c.name == bindparam(md_name))
                                    .where(md.c.value == bindparam(md_value))
                                    .distinct()
                                    .alias('md_{}'.format(i)))

                    parsed_dimension = metric_dimension.split(':')
                    sub_query_from = (sub_query_from
                                      .join(sub_query_md,
                                            sub_query_md.c.dimension_set_id ==
                                            mdd.c.metric_dimension_set_id))

                    parms[md_name] = parsed_dimension[0].encode('utf8')
                    parms[md_value] = parsed_dimension[1].encode('utf8')

                    sub_query = (sub_query
                                 .select_from(sub_query_from)
                                 .distinct())
                    query = query.where(a.c.id.in_(sub_query))

            if group_by_columns:
                query = (query
                         .order_by(*group_by_columns)
                         .group_by(*group_by_columns))

            if limit:
                query = query.limit(bindparam('b_limit'))
                parms['b_limit'] = limit + 1

            if offset:
                query = query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            query = query.distinct()

            return [dict(row) for row in conn.execute(query, parms).fetchall()]