Il existe plusieurs alternatives à carbon, la plus connu étant surement influxdb. Celle ci est basée sur cassandra!
+https://github.com/kairosdb/kairos-carbon
via arnaudb
Encore une discussion intéressante sur les perf de graphite. Ce qu'il faut retenir c'est que le botteneck peut se situer au niveau du CPU ou au niveau du disque (la RAM en général ce n'est pas un probleme, meme si bien sur, il faut la surveiller)
Pour connaitre l'utilisation du CPU de carbon-cache, une metric est envoyé par le daemon dans carbon.agents.graphite-x.cpuUsage
Pour connaitre l'utilisation du disk, on se sert de iostat -dmx 1 2 (merci arnaud)
Si le disque est trop haut (entre 50 et 75), il faut le soulager en baissant dans la conf de carbon le max update par seconde.
Ce qui aura pour effet d'augmenter la taille du cache et donc de faire plus travailler le CPU..
Au contraire si le CPU est chargé mais que le disque ne fait rien, il faut augmenter le max update par seconde.
En trouvant le bon équilibre on peut exploiter au maximum le hardware disponible
Un screenshot d'un dashboard qu'il est bien
front end graphite avec un backend cassandra (carrément)
via arnaudb
#
#
#source /lib/lsb/init-functions
. /lib/lsb/init-functions
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
GRAPHITE_HOME=/opt/graphite
NAME=carbon-cache
DESC=carbon-cache
#Carbon has its own logging facility, by default in /opt/graphite/storage/log/carbon-cache-*
DAEMON=$GRAPHITE_HOME/bin/carbon-cache.py
PIDFILE=/opt/graphite/storage/carbon-cache-a.pid
SCRIPTNAME=/etc/init.d/$NAME
if [ ! -x "$DAEMON" ]; then {
echo "Couldn't find $DAEMON or not executable"
exit 99
}
fi
[ -f /etc/default/rcS ] && . /etc/default/rcS
#
#
do_start()
{
# 0 if daemon has been started
# 1 if daemon was already running
# 2 if daemon could not be started
# Test to see if the daemon is already running - return 1 if it is.
start-stop-daemon --start --pidfile $PIDFILE \
--exec $DAEMON --test -- start > /dev/null || return 1
# Start the daemon for real, return 2 if failed
start-stop-daemon --start --pidfile $PIDFILE \
--exec $DAEMON -- start > /dev/null || return 2
}
#
#
do_stop() {
# 0 if daemon has been stopped
# 1 if daemon was already stopped
# 2 if daemon could not be stopped
# other if a failure occurred
log_daemon_msg "Stopping $DESC" "$NAME"
start-stop-daemon --stop --signal 2 --retry 5 --quiet --pidfile $PIDFILE
RETVAL="$?"
[ "$RETVAL" = 2 ] && return 2
# Delete the exisitng PID file
if [ -e "$PIDFILE" ]; then {
rm $PIDFILE
}
fi
return "$RETVAL"
}
case "$1" in
start)
[ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
do_start
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
stop)
[ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
do_stop
case "$?" in
0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
esac
;;
restart)
log_daemon_msg "Restarting $DESC" "$NAME"
do_stop
case "$?" in
0|1)
do_start
case "$?" in
0) log_end_msg 0 ;;
1) log_end_msg 1 ;; # Old process is still running
) log_end_msg 1 ;; # Failed to start
esac
;;
)
log_end_msg 1
;;
esac ;; status) if [ -s $PIDFILE ]; then pid= cat $PIDFILE kill -0 $pid >/dev/null 2>&1 if [ "$?" = "0" ]; then echo "$NAME is running: pid $pid." RETVAL=0 else echo "Couldn't find pid $pid for $NAME." RETVAL=1 fi else echo "$NAME is stopped (no pid file)." RETVAL=1 fi ;; *) echo "Usage: $SCRIPTNAME {start |
stop | restart | status}" >&2 exit 3 ;; esac |
---|
à regarder à la rentrée
via arnaudb
apply function > Special > draw non zero as infinite
Aide à comprendre pas mal de chose concernant graphite/statsd
Suite d'outils bien pratique pour tester/vérifier/debug ses whisper files, en particulier whisper-dump et whisper-fetch
def archive_to_bytes(archive):
def to_seconds(s):
SECONDS_IN_A = {
's': 1,
'm': 1 60,
'h': 1 60 60,
'd': 1 60 60 24,
'y': 1 60 60 24 365,
}
return int(s[:-1]) * SECONDS_IN_A[s[-1]]
archive = [map(to_seconds, point.split(':'))
for point in args.archive.split(',')]
SIZE_METADATA = 2 * 4 + 4 + 4 # 16 [!2LfL]
SIZE_ARCHIVE_INFO = 3 * 4 # 12 [!3L]+
SIZE_POINT = 4 + 8 # 12 [!Ld]+
size = 0
for resolution, retention in archive:
size += SIZE_ARCHIVE_INFO + SIZE_POINT * retention/resolution
if size:
size += SIZE_METADATA
return size
if name == 'main':
import argparse
parser = argparse.ArgumentParser(
description="Calculates the size of the whisper storage for the given \
archive (in resolution:retention format, e.g. 1m:24h,5m:3m)"
)
parser.add_argument(
'archive',
help="Archive in storage-schemas.conf format (resolution:retention)"
)
args = parser.parse_args()
print "{} >> {} bytes".format(args.archive, archive_to_bytes(args.archive))
import os
import mmap
import struct
import signal
import optparse
try:
import whisper
except ImportError:
raise SystemExit('[ERROR] Please make sure whisper is installed properly')
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
option_parser = optparse.OptionParser(usage='''%prog path''')
(options, args) = option_parser.parse_args()
if len(args) != 1:
option_parser.error("require one input file name")
else:
path = args[0]
def mmap_file(filename):
fd = os.open(filename, os.O_RDONLY)
map = mmap.mmap(fd, os.fstat(fd).st_size, prot=mmap.PROT_READ)
os.close(fd)
return map
def read_header(map):
try:
(aggregationType,maxRetention,xFilesFactor,archiveCount) = struct.unpack(whisper.metadataFormat,map[:whisper.metadataSize])
except:
raise CorruptWhisperFile("Unable to unpack header")
archives = []
archiveOffset = whisper.metadataSize
for i in xrange(archiveCount):
try:
(offset, secondsPerPoint, points) = struct.unpack(whisper.archiveInfoFormat, map[archiveOffset:archiveOffset+whisper.archiveInfoSize])
except:
raise CorruptWhisperFile("Unable to read archive %d metadata" % i)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * whisper.pointSize,
}
archives.append(archiveInfo)
archiveOffset += whisper.archiveInfoSize
header = {
'aggregationMethod' : whisper.aggregationTypeToMethod.get(aggregationType, 'average'),
'maxRetention' : maxRetention,
'xFilesFactor' : xFilesFactor,
'archives' : archives,
}
return header
def dump_header(header):
print 'Meta data:'
print ' aggregation method: %s' % header['aggregationMethod']
print ' max retention: %d' % header['maxRetention']
print ' xFilesFactor: %g' % header['xFilesFactor']
print
dump_archive_headers(header['archives'])
def dump_archive_headers(archives):
for i,archive in enumerate(archives):
print 'Archive %d info:' % i
print ' offset: %d' % archive['offset']
print ' seconds per point: %d' % archive['secondsPerPoint']
print ' points: %d' % archive['points']
print ' retention: %d' % archive['retention']
print ' size: %d' % archive['size']
print
def dump_archives(archives):
for i,archive in enumerate(archives):
print 'Archive %d data:' %i
offset = archive['offset']
for point in xrange(archive['points']):
(timestamp, value) = struct.unpack(whisper.pointFormat, map[offset:offset+whisper.pointSize])
print '%d: %d, %10.35g' % (point, timestamp, value)
offset += whisper.pointSize
print
if not os.path.exists(path):
raise SystemExit('[ERROR] File "%s" does not exist!' % path)
map = mmap_file(path)
header = read_header(map)
dump_header(header)
dump_archives(header['archives'])