app: Use a ThreadPool to fetch metrics.

This commit is contained in:
Daniele Sluijters
2014-03-13 11:48:02 +01:00
parent 9d1f2f7254
commit c4d1dd3596

View File

@@ -9,6 +9,7 @@ try:
except ImportError: except ImportError:
from urllib.parse import unquote from urllib.parse import unquote
from datetime import datetime, timedelta from datetime import datetime, timedelta
from multiprocessing.dummy import Pool as ThreadPool
from flask import ( from flask import (
Flask, render_template, abort, url_for, Flask, render_template, abort, url_for,
@@ -91,19 +92,17 @@ def index():
# TODO: Would be great if we could parallelize this somehow, doing these # TODO: Would be great if we could parallelize this somehow, doing these
# requests in sequence is rather pointless. # requests in sequence is rather pointless.
prefix = 'com.puppetlabs.puppetdb.query.population' prefix = 'com.puppetlabs.puppetdb.query.population'
num_nodes = get_or_abort( pool = ThreadPool()
puppetdb.metric, endpoints = [
"{0}{1}".format(prefix, ':type=default,name=num-nodes')) "{0}{1}".format(prefix, ':type=default,name=num-nodes'),
num_resources = get_or_abort( "{0}{1}".format(prefix, ':type=default,name=num-resources'),
puppetdb.metric, "{0}{1}".format(prefix, ':type=default,name=avg-resources-per-node'),
"{0}{1}".format(prefix, ':type=default,name=num-resources')) ]
avg_resources_node = get_or_abort( fetched_metrics = pool.map(puppetdb.metric, endpoints)
puppetdb.metric,
"{0}{1}".format(prefix, ':type=default,name=avg-resources-per-node'))
metrics = { metrics = {
'num_nodes': num_nodes['Value'], 'num_nodes': fetched_metrics[0]['Value'],
'num_resources': num_resources['Value'], 'num_resources': fetched_metrics[1]['Value'],
'avg_resources_node': "{0:10.0f}".format(avg_resources_node['Value']), 'avg_resources_node': "{0:10.0f}".format(fetched_metrics[2]['Value']),
} }
nodes = puppetdb.nodes( nodes = puppetdb.nodes(