From 29612b16e8e13f3856a253168a84af18dcdfbab5 Mon Sep 17 00:00:00 2001 From: Peter Sarossy Date: Sun, 11 Aug 2019 21:21:59 -0400 Subject: [PATCH] First commit --- .dockerignore | 7 + Dockerfile | 26 + README.md | 7 +- btmon | 11 + btmon.cfg | 18 + btmon.py | 4908 +++++++++++++++++++++++++++++++++++++++++++++++++ build | 4 + 7 files changed, 4979 insertions(+), 2 deletions(-) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 btmon create mode 100644 btmon.cfg create mode 100644 btmon.py create mode 100644 build diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..9059646 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +.git +.gitignore +.idea +build +LICENSE +README* +VERSION diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..991d54c --- /dev/null +++ b/Dockerfile @@ -0,0 +1,26 @@ +# Pull base image. +FROM debian:jessie + +MAINTAINER Peter Sarossy + +# Set environment. +ENV DEBIAN_FRONTEND noninteractive + +# Install packages. +RUN apt-get update +RUN apt-get install -y curl mysql-client nano python python-mysqldb sqlite3 wget python-influxdb + +# Clean up APT when done. +RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Define working directory. +WORKDIR /opt/btmon + +# Add files to the container. +ADD . /opt/btmon + +# Define volumes. +VOLUME ["/etc/bind", "/var/lib/bind", "/var/run/named"] + +# Define the command script. +CMD ["/bin/sh", "-c", "./btmon"] diff --git a/README.md b/README.md index 69cfb99..5100cb4 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,5 @@ -# btmon-influx -btmon.py with influxdb support +Btmon Docker Image w/ Influxdb +============================== + +Docker image for running the btmon.py Brultech monitor script. + diff --git a/btmon b/btmon new file mode 100644 index 0000000..6758904 --- /dev/null +++ b/btmon @@ -0,0 +1,11 @@ +#!/bin/sh +set -e + +# Exit with error if required environment is not present +if [ -z "${BTMON_CONFIG}" ] || [ ! -f "${BTMON_CONFIG}" ]; then + echo "Envrionment variable BTMON_CONFIG must be provided" + exit 1 +fi + +# Execute the btmon python script +./btmon.py -c ${BTMON_CONFIG} diff --git a/btmon.cfg b/btmon.cfg new file mode 100644 index 0000000..3349e11 --- /dev/null +++ b/btmon.cfg @@ -0,0 +1,18 @@ +[source] +device_type = gem +ip_read = true +ip_port = 5000 +ip_mode = server +include_current = true +reverse_polarity = true +full_serials = true + +[influxdb] +influxdb_out = true +influxdb_host = 10.0.0.72 +influxdb_port = 8086 +influxdb_upload_period = 10 +influxdb_database = btmon +influxdb_measurement = energy +influxdb_mode = row +influxdb_db_schema = ecmreadext diff --git a/btmon.py b/btmon.py new file mode 100644 index 0000000..d2e22e3 --- /dev/null +++ b/btmon.py @@ -0,0 +1,4908 @@ +#!/usr/bin/python -u +__version__ = '3.3.1' +"""Data collector/processor for Brultech monitoring devices. + +Collect data from Brultech ECM-1240, ECM-1220, and GEM power monitors. Print +the data, save the data to database, or upload the data to a server. + +Includes support for uploading to the following services: + * MyEnerSave * SmartEnergyGroups * xively * WattzOn + * PlotWatt * PeoplePower * thingspeak * Eragy + * emoncms * Wattvision * PVOutput * Bidgely + * MQTT * InfluxDB + +Thanks to: + Amit Snyderman + bpwwer & tenholde from the cocoontech.com forums + Kelvin Kakugawa + brian jackson [http://fivejacksons.com/brian/] + Marc MERLIN - http://marc.merlins.org/ + Ben + Eric Sandeen + Mark Lennox + Graham Allan + +Copyright 2012-2015 Matthew Wall, all rights reserved + +This program is free software: you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, either version 3 of the License, or any later version. + +This program is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR +A PARTICULAR PURPOSE. + +See http://www.gnu.org/licenses/ + + +Example Usage: + +btmon.py --serial --serialport=/dev/ttyUSB0 -p + +Example Output: + +2010/06/07 21:48:37: ECM ID: 355555 +2010/06/07 21:48:37: Volts: 120.90V +2010/06/07 21:48:37: Ch1 Amps: 0.25A +2010/06/07 21:48:37: Ch2 Amps: 3.24A +2010/06/07 21:48:37: Ch1 Watts: -124.586KWh ( 1536W) +2010/06/07 21:48:37: Ch1 Positive Watts: 210.859KWh ( 1536W) +2010/06/07 21:48:37: Ch1 Negative Watts: 335.445KWh ( 0W) +2010/06/07 21:48:37: Ch2 Watts: -503.171KWh ( 0W) +2010/06/07 21:48:37: Ch2 Positive Watts: 0.028KWh ( 0W) +2010/06/07 21:48:37: Ch2 Negative Watts: 503.199KWh ( 0W) +2010/06/07 21:48:37: Aux1 Watts: 114.854KWh ( 311W) +2010/06/07 21:48:37: Aux2 Watts: 80.328KWh ( 523W) +2010/06/07 21:48:37: Aux3 Watts: 13.014KWh ( 35W) +2010/06/07 21:48:37: Aux4 Watts: 4.850KWh ( 0W) +2010/06/07 21:48:37: Aux5 Watts: 25.523KWh ( 137W) + + +How to specify options: + +Options can be specified via command line, in a configuration file, or by +modifying constants in this file. Use --help to see the complete list +of command-line options. The configuration file is INI format, with parameter +names corresponding to the command-line options. + +Here are contents of a sample configuration file that listens for data on port +8083 then uploads only channel 1 and channel 2 data from device with serial +number 311111 to plotwatt and enersave and saves to a MySQL database: + +[source] +ip_read = true +ip_port = 8083 +ip_mode = server + +[mysql] +mysql_out = true +mysql_host = localhost +mysql_user = ecmuser +mysql_passwd = ecmpass + +[plotwatt] +plotwatt_out = true +pw_map = 311111_ch1,123,311111_ch2,124 +pw_house_id = 1234 +pw_api_key = 12345 + +[enersave] +enersave_out = true +es_map = 311111_ch1,kitchen,2,311111_ch2,solar panels,1 + + +Data Collection: + +Data can be collected by serial/usb, tcp/ip as client or server, or by polling +a database. + +When collecting via serial/usb, btmon can either poll the device or block until +data appear. Polling should be used when the device is not in real-time mode, +blocking should be used when the device is in real-time mode. + +When collecting via tcp/ip, btmon can act as either a server or client. When +in server mode, btmon blocks until a client connects. When in client mode, +btmon can either poll or block. When polling, btmon opens a connection to the +server, makes a request for data, then closes the connection. When blocking, +btmon opens a connection to the server then blocks until data have been read. + +When collecting from a database, btmon queries the database periodically then +processes the new data as if they were generated by a brultech device. + + +Data Processing: + +Data can be printed to standard output, saved to database, and/or sent to one +or more hosted services. Instructions for each of the services are enumerated +in the following sections. + + +MySQL Database Configuration: + +Connections to MySQL require the pysqldb module. On debian systems install the +module using apt-get: + +apt-get install python-mysqldb + +Ensure a user exists and has proper permissions. For example, for the user +'ecmuser' with password 'ecmpass' connecting from 'ecmclient' to the database +'ecm', do something like this on the system hosting the database: + +# mysql -u root -p +# mysql> create user ecmuser identified by 'ecmpass' +# mysql> create database ecm; +# mysql> grant usage on ecm.* to ecmuser@ecmclient identified by 'ecmpass'; +# mysql> grant all privileges on ecm.* to ecmuser@ecmclient; + +The user must have permission to create a database (if it does not already +exist), permission to create tables (if the table does not already exist), +and permission to modify tables. + +Specify parameters in a configuration file config.txt: + +[mysql] +mysql_out = true +mysql_host = localhost +mysql_user = ecmuser +mysql_passwd = ecmpass + +The database and table will be created automatically, assuming that the user +has appropriate permissions. + +To create the database and table explicitly, use the configure option: + +btmon.py --mysql-config + +Beware that the database will grow unbounded using this configuration. With +only basic data (volts, amps, watts) from 8 ECM-1240s, the mysql database is +about 450MB after 10 months of continuous operation. + +If you do not want the database to grow, then do not create the 'id' primary +key, and make the serial the primary key without auto_increment. In that +case the database is used for data transfer, not data capture. + + +Sqlite Database Configuration: + +Specify the filename in a configuration file config.txt: + +[sqlite] +sqlite_out = true +sqlite_file = /var/btmon/ecm.db + +The database and table will be created automatically. + +To create the database and table explicitly, use the configure option: + +btmon.py --sqlite-config + +To create the database and table manually, do something like this: + +sqlite3 /var/btmon/ecm.db +sqlite> create table ecm (id int primary key, time_created bigint, ... + +With database schema 'counters' and GEM48PTBinary packets, the database will +grow by a bit less than 1K for each packet. + + +Round-Robin Database (RRD) Configuration: + +Specify the location for RRD files in a configuration file config.txt: + +[rrd] +rrd_out = true +rrd_dir = /var/btmon/rrd + +RRD files are fixed size - there is no need to prune as more data are recorded. + +The following parameters are used: + + step - how often data will be recorded + heartbeat - how much time between samples before data are considered missing + consolidation - how samples should be combined over time + resolution - how many samples should be recorded + +The step should be equal to the period at which data come from the device, +either the real-time period if the device is in real-time mode, or the polling +interval if the device is not. + +The default settings result in one file per channel with the following: + 4 days of 30 second samples + 60 days of 5 minute averages + 365 days of 30 minute averages + 730 days of 1 hour averages +For a single GEM with 48 channels, 4 pulse counters, and 8 one-wire sensors, +this is a total of about 162M for 2 years of data. On an arm-based plug +computer reading/writing to a very slow usb drive, a single update takes about +45 seconds for 108 RRD files. + + +InfluxDB Configuration: + +Connections to InfluxDB require the InfluxDB-Python client. On Debian/Ubuntu, +you can install it with + + apt-get install python-influxdb + +Otherwise, you can run + + pip install influxdb + +This uses the InfluxDB HTTP API, which by default runs on port 8086. You must +specify the database and measurement to write to. + +[influxdb] +influxdb_out = true +influxdb_host = localhost # default +influxdb_port = 8086 # default +influxdb_upload_period = 60 # default +influxdb_username = btmon # if using authentication +influxdb_password = abcde # if using authentication +influxdb_database = btmon # required +influxdb_measurement = energy # required +influxdb_mode = row # "row": 1 series w/ many values; "col": many series w/ 1 value each +influxdb_map = 1234567_ch1_aws,a,1234567_ch2_aws,b # renames channels +influxdb_tags = key1,value1,key2,value2 # adds tags +influxdb_db_schema = counters,ecmread,ecmreadext # selects schema, default counters + + +OpenEnergyMonitor Configuration: + +1) register for an account +2) obtain the API key + +Register for an account at the emoncms web site. + +Obtain the API key with write access. + +By default, all channels on all ECMs will be uploaded. + +For example, this configuration will upload all data from all ECMs. + +[openenergymonitor] +oem_out=true +oem_token=xxx + + +WattzOn Configuration: + +1) register for an account +2) obtain an API key +3) configure devices that correspond to ECM channels + +As of December 2011, it appears that WattzOn service is no longer available. + + +PlotWatt Configuration: + +1) register for an account +2) obtain a house ID and an API key +3) configure meters that correspond to ECM channels + +First register for a plotwatt account at www.plotwatt.com. You should receive +a house ID and an api key. Then configure 'meters' at the plotwatt web site +that correspond to channels on each ECM. + +Using curl to create 2 meters looks something like this: + +curl -d "number_of_new_meters=2" http://API_KEY:@plotwatt.com/api/v2/new_meters + +Using curl to list existing meters looks something like this: + +curl http://API_KEY:@plotwatt.com/api/v2/list_meters + +Use the meter identifiers provided by plotwatt when uploading data, with each +meter associated with a channel/aux of an ECM. For example, to upload data +from ch1 and ch2 from ecm serial 399999 to meters 1000 and 1001, respectively, +use a configuration like this: + +[plotwatt] +plotwatt_out=true +pw_house_id=XXXX +pw_api_key=XXXXXXXXXXXXXXXX +pw_map=399999_ch1,1000,399999_ch2,1001 + + +EnerSave Configuration (deprecated as of 2014 - use Bidgley instead): + +1) create an account +2) obtain a token +3) optionally indicate which channels to record and assign labels/types + +Create an account at the enersave web site. Specify 'other' as the device +type, then enter ECM-1240. + +To obtain the token, enter this URL in a web browser: + +https://data.myenersave.com/fetcher/bind?mfg=Brultech&model=ECM-1240 + +Define labels for ECM channels as a comma-delimited list of tuples. Each +tuple contains an id, description, type. If no map is specified, data from +all channels will be uploaded, generic labels will be assigned, and the type +for each channel will default to net metering. + +EnerSave defines the following types: + + 0 - load + 1 - generation + 2 - net metering (default) + 10 - standalone load + 11 - standalone generation + 12 - standalone net + +For example, via configuration file: + +[enersave] +es_token=XXX +es_map=399999_ch1,kitchen,,399999_ch2,solar array,1,399999_aux1,, + + +Bidgely Configuration: + +1) Choose "Get Started" at http://www.bidgely.com/home +2) Select your zip code, etc. +3) Choose "I have an energy monitor" +4) Enter your name, email and password to create your account +5) At 1. Select your Energy Monitor choose Bidgely API from the + drop-down list and click "I have this monitor" +6) Optionally download the API documentation +7) Choose "I have downloaded the API document +9) Copy the Upload URL to somewehre safe +10) Add the upload URL to your config file by_url= parameter +11) Click "Connect to Bidgely" and start btmon. + +Define labels for ECM channels as a comma-delimited list of tuples. Each +tuple contains an id, description, type. If no map is specified, data from +all channels will be uploaded, generic labels will be assigned, and the type +for each channel will default to "load." + +Bidgely defines the following types: + + 0 - load (total consumption on site) + 1 - generation (total generation on site) + 2 - net metering (net consumption + generation for all circuits) + 10 - standalone load (DFLT) (subset of total load, i.e. single circuit) + 11 - standalone generation (subset of total generation, i.e. single circuit) + +Note that types 0, 1, and 2 should each only be assigned to one circuit +in the map. Types 10 and 11 may be assigned to several circuits. + +Although type 10 (individual circuit) is the default, you should always define +one map item with type 0, for total load, to make Bidgely happy. + +For this reason, a map is required to use the Bidgely service, and it should +contain at least one channel of type 0 for total load. + +For example, via configuration file: + +[bidgely] +by_url=https://api.bidgely.com/v1/users/TOKEN/homes/1/gateways/1/upload +by_map=399999_ch1,mains,0,399999_ch2,solar array,1,399999_aux1,hot tub,10, + + +PeoplePower Configuration: + +1) create an account +2) obtain an activation key +3) register a hub to obtain an authorization token for that hub +4) indicate which ECM channels should be recorded by configuring devices + associated with the hub + +1) Find an iPhone or droid, run the PeoplePower app, register for an account. + +2) When you register for an account, enter TED as the device type. An +activation key will be sent to the email account used during registration. + +3) Use the activation key to obtain a device authorization token. Create an +XML file with the activation key, a 'hub ID', and a device type. One way to do +this is to treat the btmon script as the hub and each channel of each ECM +as a device. Use any number for the hub ID, and a device type 1004 (TED-5000). + +For example, create the file req.xml with the following contents: + + + + xxx:yyyyyyyyyyyyyyy + 1 + 1004 + + + +Send the file using HTTP POST to the hub activation URL: + +curl -X POST -d @req.xml http://esp.peoplepowerco.com/espapi/rest/hubActivation + +You should get a response with resultCode 0 and a deviceAuthToken. The +response also contains pieces of the URL that you should use for subsequent +configuration. For example: + + + + 0 + esp.peoplepowerco.com + true + 8443 + deviceio/ml + XXXXX + + +which results in this URL: + +https://esp.peoplepowerco.com:8443/deviceio/ml + +4) Add each channel of each ECM as a new ppco device. The btmon script +will configure the device definitions, but it requires a map of ECM channels +to device names. This map also indicates the channel(s) from which data +should be uploaded. + +For example, via configuration file: + +[peoplepower] +peoplepower_out=true +pp_url=https://esp.peoplepowerco.com:8443/deviceio/ml +pp_token=XXX +pp_hub_id=YYY +pp_map=399999_ch1,999c1,399999_aux2,999a2 + +Additional notes for PeoplePower: + +Use the device type 1005, which is a generic TED MTU. Create an arbitrary +deviceId for each ECM channel that will be tracked. Apparently the deviceId +must contain hex characters, so use c1 and c2 for channels 1 and 2 and use aN +for the aux. The seq field is a monotonically increasing nonce. + + + + + + + + + + +Send the file to the URL received in the activation response. + +curl -H "Content-Type: text/xml" -H "PPCAuthorization: esp token=TOKEN" -d @add.xml https://esp.peoplepowerco.com:8443/deviceio/ml + +To get a list of devices that ppco recognizes: + curl https://esp.peoplepowerco.com/espapi/rest/deviceTypes + + +Eragy Configuration: + +1) register power sensor at the eragy web site +2) obtain a token +3) create an account + +The Eragy web site only knows about TED, Blueline, and eGauge devies. Register +as a TED5000 power sensor. Eragy will provide a URL and token. Use curl to +enter the registration for each ECM. Create a file req.xml with the request: + + + XXX + TOKEN + + +where XXX is an arbitrary gateway ID and TOKEN is the token issued by Eragy. +If you have only one ECM, use the ECM serial number as the gateway ID. If +you have multiple ECM, use one of the ECM serial numbers or just pick a number. + +Send the request using curl: + +curl -X POST -d @req.xml http://d.myeragy.com/energyremote.aspx + +The server will respond with something like this: + + + d.myeragy.com + false + 80 + /energyremote.aspx + + TOKEN + 1 + + +At the eragy web site, click the 'find my sensor' button. Eragy assumes that +the energy sensor will immediately start sending data to eragy, so start +running btmon. + +On the eragy web site, continue and create an account. Eragy will email to +you an account password which you must enter to complete the account. Then +configure the account with a name, timezone, etc. and password. + +The eragy configuration would be: + +[eragy] +eragy_out=true +eg_token=TOKEN +eg_gateway_id=XXX + + +Smart Energy Groups Configuration: + +1) create an account +2) create a site +3) obtain the site token + +Create an account at the smart energy groups web site. + +Create a site at the smart energy groups web site. + +Obtain the site token at the smart energy groups web site. + +Automatically create devices using the 'Discoveries' option in the SEG tools, +or manually create devices on the smart energy groups web site. + +To use the discovery tool, start uploading data. Click the discovery tool, +then wait awhile as SEG detects the uploads. After a few minutes SEG will +list the devices and channels, then you can enter names for each channel. + +The manual process is a bit more involved. Create one device per ECM/GEM. +For each device create streams - one power stream and one energy stream for +each channel. Define a node name for each device based on the following. + +By default, data from all channels on all ECM/GEM will be uploaded. The node +name is the obfuscated serial number, for example XXX123 for the serial +number 355123. The stream name is p_* or e_* for each channel for power or +energy, respectively. For example, p_ch1, e_ch1, p_aux1, e_aux1 + +To upload only a portion of the data, or to use names other than the defaults, +specify a map via command line or configuration file. It is easiest to use the +default node names then add longer labels at the Smart Energy Groups web site. + +For example, here is a configuration that uploads data only from ch1 and aux2 +from ECM 399999, using stream names p_ch1, e_ch1, p_lighting, and e_lighting. + +[smartenergygroups] +smartenergygroups_out=true +seg_token=XXX +seg_map=399999_ch1,,399999_aux2,lighting + + +ThingSpeak Configuration: + +1) create an account +2) create one or more thingspeak channels +3) on each thingspeak channel, create a field for each ECM/GEM channel + +Create an account at the ThingSpeak web site. + +Create a ThingSpeak channel. Obtain the token (write api key) for each +channel. + +Create a field for each ECM/GEM channel from which you will upload data. +Each thingspeak channel is limited to 8 fields. + +By default, data from all channels on all ECM/GEM will be uploaded. The +channel ID and token must be specified for each ECM/GEM. By default, the +ECM/GEM channels will be uploaded to fields 1-8. + +For example, this configuration will upload all data from ECM with serial +399999 to thingspeak channel with token 12345 and from ECM with serial +399998 to thingspeak channel with token 12348. + +[thingspeak] +thingspeak_out=true +ts_tokens=399999,12345,399998,12348 + +This configuration will upload only ch1 from 399999 to field 3 and aux5 from +399998 to field 8: + +[thingspeak] +thingspeak_out=true +ts_tokens=399999,12345,399998,12348 +ts_fields=399999_ch1,3,399998_aux5,8 + + +Xively/Pachube/COSM Configuration: + +1) create an account +2) obtain API key +3) create a feed + +Create an account at the Pachube web site. + +Obtain the API key from the Pachube web site. + +Create a feed at the Pachube web site, or using curl as described at pachube. + +By default, data from every channel from every ECM will be uploaded to a single +pachube feed. + +[pachube] +pachube_out=true +pbe_token=XXXXXX +pbe_feed=3000 + + +Wattvision Configuration: + +1) create an account +2) obtain API ID, API Key, and Sensor ID + +Create account by linking to a google account. + +Create a House at the wattvision web site. + +Skip the sensor and pairing. + +Get the API ID, API Key, and Sensor ID from the API Information section of +Settings. + +Wattvision records only a single value - it is designed only for whole-house +monitoring. So you must specify which channel is the total energy/power +reading. + +[wattvision] +wattvision_out=true +wv_api_id = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +wv_api_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +wv_sensor_id = XXXXXXXX +wv_channel = 399999_ch1 + + +PVOutput Configuration: + +1) create an account +2) obtain API Key and System ID + +Create an account at the PVOutput web site. + +Create a system at the PVOutput web site. + +In the Settings panel, obtain the API Key and System Id. + +PVOutput expects the following information for each system: +- generated power/energy +- consumed power/energy +- temperature +- voltage + +You must identify which channel monitors the total generated energy/power, and +which channel monitors the total consumed energy/power. If the device is a GEM +you can identify one of the temperature sensors as the temperature to upload. + +The generation and consumption channels can be identified by channel, e.g. ch1, +or by serial_channel pair, e.g. 399999_ch1. The latter is useful when +collecting data from multiple ECM/GEM devices. + +[pvo] +pvo_out = true +pvo_api_key = XXXXX +pvo_system_id = XXXXX +pvo_generation_channel = 399999_ch1 +pvo_consumption_channel = 399999_ch2 +pvo_temperature_channel = 399999_t1 + + +MQTT Configuration: + +1) setup an mqtt server, like mosquitto +2) create a password-protected mqtt account, eg. "energy" +3) use pip to install the "paho-mqtt" module + +Without a mqtt_map parameter config, the setup will push data to the mqtt +server with topic names of the form: + +the voltage of the system: + /volts -> + +for each energy channel: + /ch _w -> + /ch _wh -> + /ch _dwh -> + +for each temperature channel: + /t -> + +for each pulse channel: + /p -> + +for example: + [{"topic":"/house/energy/volts", "payload":123.7}, + {"topic":"/house/energy/399999_ch1_w", "payload":539.533}, + {"topic":"/house/energy/399999_ch1_wh", "payload":6851728.012}, + {"topic":"/house/energy/399999_ch1_dwh", "payload":2.248}, + {"topic":"/house/energy/399999_p1", "payload":3141592}, + {"topic":"/house/energy/399999_t1", "payload":79.500}] + +Temperature, Pulse and Voltage values are emitted only if supported by the +underlying device. + +By configuring the mqtt_map parameter, you can set this up to provide output +like this: + + [{"topic":"/house/energy/volts", "payload":123.7}, + {"topic":"/house/energy/solar", "payload":539.533}, + {"topic":"/house/energy/solar_wh", "payload":6851728.012}, + {"topic":"/house/energy/solar_dwh", "payload":2.248}, + {"topic":"/house/energy/water", "payload":3141592}, + {"topic":"/house/energy/garage_temperature", "payload":79.500}] + +Here's a sample mqtt configuration/section running against an mqtt broker +on the same instance (localhost) as the btmon.py process. + +[mqtt] +mqtt_out=true +mqtt_host=localhost +mqtt_port=1883 +mqtt_clientid=btmon +mqtt_base_topic=/house/energy +mqtt_user=energy +mqtt_passwd=please_set_me +mqtt_map=399999_ch1_w,solar,399999_ch1_wh,solar_wh,399999_t1,garage +mqtt_upload_period=60 + + +Upgrading: + +Please consider the following when upgrading from ecmread.py: + +* the database collectors have changed completely, so it is no longer possible + to use a database collector with the ecmread or ecmreadext schemas. only the + counters schema contains enough information to use database as collector. +* the -d option has been replaced by --mysql or --sqlite +* the db_* directives in the configuration file are now specific to the type + of database. for example, db_passwd is now mysql_passwd or sqlite_passwd. +* the default database schema is called 'counters' - it stores the values from + the counters, rather than derived values. the basic ecm1240 and extended + ecm1240 schemas are still implemented, but are useful only for storage, not + for collection. +* the default name for the database table is derived from the packet format + and the schema type. +* the packet format must be specified, either at the command line or in the + configuration file. +* uploads to people power and eragy are now correct - this means they will be + off by a factor of 1000. ecmread 2.4.4 and earlier uploaded kilowatt-hours + when it should have uploaded watt-hours. + + +Changelog: + +- 3.3.1 +* added diffent schema formats to GEM and InfluxDB + +- 3.3.0 +* added InfluxDB support (thanks to chicago6061 and mroch) + +- 3.2.0 07sep16 mwall +* added MQTT support (thanks to mrguessed) + +- 3.1.2 06sep16 mwall +* added option to disable serial obfuscation (thanks to mroch) +* added option for utc clock (thanks to mroch) +* supress sql warnings (thanks to WaterByWind) +* added option to include current (thanks to WaterByWind) +* send volts to open energy monitor (thanks to WaterByWind) + +- 3.1.1 06mar15 mwall +* fixed debug message in SocketServerCollector (thanks to Brian Klass) + +- 3.1.0 07feb15 mwall +* punt old pachube/cosm url, use xively now +* ensure poll_interval is integer +* fix enersave uploader (thanks to Eric Sandeen) +* fix order of sqlite args +* miscellaneous sqlite and mysql fixes (thanks to Eric Sandeen and Marc Lennox) +* added node option for compatibility with emoncms 8.4.x +* updated support for wattvision api v0.2 (thanks to Eric Sandeen) +* added support for bidgley api v1.0.1 (thanks to Eric Sandeen) + +- 3.0.7 01feb14 mwall +* use localtime for pvoutput uploads (thanks to Eric Sandeen) +* better handling of failures when connecting via socket (thanks to Ken Overly) + +- 3.0.6 22jan12 mwall +* added retries for regular reads, not just polling reads +* do empty read check on each read - windows tcp/ip stack differs from linux! +* added temperature calculation to match gem firmware later than 1.61 + +- 3.0.5 26dec12 mwall +* fix bug in delta wh calculation when polarity is reversed +* spew less into logs when thingspeak uploads fail + +- 3.0.4 26dec12 mwall +* upload both energy and power number to oem +* added reverse-polarity option +* added copyright/licensing details +* added pvoutput.org + +- 3.0.3 03dec12 mwall +* revert the rrd changes. we will make the changes soon, but no need to + disrupt the brultech installations for now. testing is progressing on the + radio thermostat and eds one-wire monitoring systems, and once that is + sorted we will make the rrd changes on btmon. + +- 3.0.2 03dec12 mwall +* upload pulse and sensor data to oem, seg, and pachube +* improve reporting of read errors when in polling mode +* include timestamp on thingspeak uploads +* change default rrd values to assume 10s sampling +* use 'data' as the data source name for all rrd files +* punt max/min consolidates in rrd until we sort out the graphing issues + +- 3.0.1 04nov12 mwall +* force period and timeout to be int +* adjustments to default values for service upload periods +* added support for wattvision +* added 0.5 multiplier for one-wire bus (only for DS18B20?) +* added command-line and config file options for poll intervals +* added command-line and config file options for update/insert periods +* do bulk uploading for pachube/cosm +* fix off-by-one bug in thingspeak default field indexing +* do bulk uploading for seg + +- 3.0.0 02oct12 mwall +* initial release, based on ecmread 2.4.5 +* support for rrdtool +* refactored database support +* improved packet buffering and handling +* polling or blocking modes for serial and socket-client +* support gem, ecm1240, ecm1220 +* support multiple packet types +* automatic configuration of database for mysql and sqlite +* support for multiplexed devices in polling mode + +""" +__author__ = 'mwall' +__app__ = 'btmon' + +# memory use +# +# The use of fixed buffer should prevent this program from growing. On a slow +# arm-based plug computer running debian6 we see the following memory use: +# VSZ RSS +# btmon 22128 8100 - startup, collecting from 4 ECM-1240 via serial +# btmon 29036 15068 - full buffer of 600, collecting from 4 ECM-1240 +# btmon 49996 36184 - full buffer of 600, collecting from 1 GEM +# btmon 28200 14368 - full buffer of 120, collecting from 1 GEM +# ecmread 14540 6800 - startup, collecting from 4 ECM-1240 via serial +# ecmread 23908 8328 - collecting from 4 ECM-1240, after multiple hours +# ecmread 24412 9272 - uploading to multiple services +# +# processing of data +# +# Be sure to get the combination of buffer size, sampling time, and upload +# period correct. Packet processing happens after each read. So if the device +# is set to emit packets every 5 seconds, then processing will happen every 5 +# seconds. If processing takes more than 5 seconds, one or more reads may be +# missed (depending on lower-level buffering). Note that the opportunity to +# process happens after each read, but actual processing does not always happen +# after each read. Some processors will process after each each read, others +# wait for data to collect before processing it. +# +# Ensure that no processor will take too long. If a processor takes longer +# than a sampling period, data from the next sample will probably be lost. If +# a processor waits longer than the sampling period times the buffer size, then +# some samples will not be uploaded. +# +# For example, with an ECM that emits data every 10 seconds and a mysql +# processor that saves to a database on the localhost every 60 seconds, +# the buffer size must be at least 6. Setting it to 12 or 18 accommodates +# periodic timeouts to the database. +# +# If network speeds are slow and/or hosted services are slow or unreliable, +# run a second instance of this program. Run one instance to collect data, +# and a second instance to upload data. +# +# The default values assume a sampling interval of 10 seconds. +# +# serial numbers +# +# For the green-eye, the serial number is 8 characters long - a 5 character +# serial plus a 3 character unit id. +# +# For the ecm-1240, the serial number is 6 characters long - a 5 character +# serial plus a 1 character unit id. +# +# However, the GEM unit id may be more than one character, so serials that +# a GEM generates in ECM packets may have to be longer than 6 characters. +# +# packets +# +# There are three kinds of packets: raw, compiled, and calculated. +# Raw packets are what we get from the device. They may be binary, +# they may be ascii, they may ECM-1240 format, they may be GEM format, +# or they may be any of many other formats. +# +# Compiled packets have been augmented with labels such as 'ch1_aws'. The +# labels are associated with the raw data but no calculations have been done. +# Compiling a packet requires only a raw packet. +# +# Calculated packets contain labels with derivative values such as 'ch1_w'. +# Calculating a packet requires two compiled packets, since many of the +# calculated values are differences. +# +# The GEM binary packet has 48 channels. Since only 32 of them are currently +# used (oct2012), we use only 32 of them in order to minimize memory and +# storage. +# +# calculation of watts and watt-hours +# +# The brultech devices record only a cumulative measure - the total number of +# watt-seconds, which is a measure of energy. From this there are two measures +# typically desired - average power use (e.g. in Watts), and cumulative energy +# use (e.g. in Kilowatt-Hours). +# +# Most services accept average measures of power and treat it as an +# instantaneous power level. It is, in fact, the energy use over a period +# of time divided by the period of time. The more often a device is sampled, +# the more accurate the power readings will be; sampling infrequently will miss +# spikes of energy use, but will not miss total energy use. +# +# Some services require a cumulative measure of energy. Others require a +# differential measure of energy - how much energy consumed since the last +# reading. This can be problematic when uploads fail, power goes out, or +# other situations arise where data cannot be uploaded. The cumulative +# reading is less error-prone in these situations. +# +# This implementation calculates power and energy as follows: +# +# seconds = sec_counter1 - sec_counter0 +# w1 = (abs_ws1 - abs_ws0) / seconds if pw == 0 multiply by -1 +# pos_w1 = (pol_ws1 - pol_ws0) / seconds +# neg_w1 = w - pw +# pos_wh1 = pol_ws1 / 3600 +# neg_wh1 = (abs_ws1 - pol_ws1) / 3600 +# wh1 = pos_wh1 - neg_wh1 same as (2*pws1 - aws1) / 3600 +# delta_wh1 = wh1 - wh0 +# +# database schemas +# +# The Configurator classes will automatically create a database and table +# with the appropriate schema. Database/table creation should happen +# automatically the first time the application is run, but the configuration +# can also be run explicitly. + +# TODO: use 'data' as name for every source in the rra to facilitate graphing +# TODO: make reverse polarity the default so we match brultech convention +# TODO: adjust processing logic to retry packets if upload failed +# TODO: support both GEM and ECM on same serial or tcp/ip socket +# TODO: parameterize sample period to automatically set buffer size and step +# TODO: implement rrd as data source +# TODO: figure out how to deal robustly with counter resets +# TODO: check size of post/put/get and ensure size is not too big +# TODO: bulk uploads for openenergymonitory, thinkspeak if possible +# TODO: use row-per-sensor database schema (more future-proof) + +MINUTE = 60 +SpH = 3600.0 + +# if set to 1, print out what would be uploaded but do not do the upload. +SKIP_UPLOAD = 0 + +# if set to 1, trust the device's clock +TRUST_DEVICE_CLOCK = 0 + +# if set to 1, assume timestamps from the device are in UTC +DEVICE_CLOCK_IS_UTC = 0 + +# if set to 1, obfuscate any serial number before uploading +OBFUSCATE_SERIALS = 1 + +# brultech assumes that positive polarized watt-seconds means negative energy, +# resulting in this calculation for watt-seconds: +# ws = aws - 2*pws +# btmon assumes that positive polarized watt-seconds means positive energy, +# resulting in this calculation for watt-seconds: +# ws = 2*pws - aws +# when reverse polarity is specified, the sign is inverted for each channel +# that has a polarized watt-second counter so that the power and energy values +# match those of brultech software. +REVERSE_POLARITY = 0 + +# the gem includes an option to include current values in packets. if this is +# enabled in the gem, then enable it here to process the current values. +INCLUDE_CURRENT = 0 + +# number of retries to attempt when reading device, 0 means retry forever +READ_RETRIES = 0 + +# how long to wait after a failure before attempting to read again, in seconds +RETRY_WAIT = 0 + +# number of retries to attempt when polling device +POLL_RETRIES = 3 + +# size of the rolling buffer into which data are cached +# should be at least max(upload_period) / sample_period +# a sample period of 10s and max upload period of 15m (900s), with contingency +# of 5m (300s) server/network downtime yields a buffer of 120 +DEFAULT_BUFFER_SIZE = 120 + +# how long to wait before considering an upload to have failed, in seconds +DEFAULT_UPLOAD_TIMEOUT = 15 + +# how often to upload data, in seconds +# this may be overridden by specific services +DEFAULT_UPLOAD_PERIOD = 15 * MINUTE + +# device types +DEV_ECM1220 = 'ecm1220' +DEV_ECM1240 = 'ecm1240' +DEV_GEM = 'gem' +DEVICE_TYPES = [DEV_ECM1220, DEV_ECM1240, DEV_GEM] +DEFAULT_DEVICE_TYPE = DEV_ECM1240 + +# packet formats +PF_ECM1220BIN = 'ecm1220bin' # ECM-1220 binary +PF_ECM1240BIN = 'ecm1240bin' # ECM-1240 binary +PF_GEM48PTBIN = 'gem48ptbin' # GEM 48-channel binary with polarity, timestamp +PF_GEM48PBIN = 'gem48pbin' # GEM 48-channel binary with polarity +PACKET_FORMATS = [PF_ECM1220BIN, PF_ECM1240BIN, PF_GEM48PTBIN, PF_GEM48PBIN] + +# the database schema +DB_SCHEMA_COUNTERS = 'counters' # just the counters and sensor readings +DB_SCHEMA_ECMREAD = 'ecmread' # basic format used by ecmread +DB_SCHEMA_ECMREADEXT = 'ecmreadext' # extended format used by ecmread +DB_SCHEMAS = [DB_SCHEMA_COUNTERS, DB_SCHEMA_ECMREAD, DB_SCHEMA_ECMREADEXT] +DEFAULT_DB_SCHEMA = DB_SCHEMA_ECMREAD + +# channel filters +FILTER_PE_LABELS = 'pelabels' +FILTER_CURRENT = 'current' +FILTER_POWER = 'power' +FILTER_ENERGY = 'energy' +FILTER_PULSE = 'pulse' +FILTER_SENSOR = 'sensor' +FILTER_DB_SCHEMA_COUNTERS = DB_SCHEMA_COUNTERS +FILTER_DB_SCHEMA_ECMREAD = DB_SCHEMA_ECMREAD +FILTER_DB_SCHEMA_ECMREADEXT = DB_SCHEMA_ECMREADEXT + +# serial settings +# the serial port to which device is connected e.g. COM4, /dev/ttyS01 +SERIAL_PORT = '/dev/ttyUSB0' +SERIAL_BAUD = 19200 +SERIAL_BUFFER_SIZE = 2048 + +# ethernet settings +# the etherbee defaults to pushing data to port 8083 +# the wiz110rs defaults to listening on port 5000 +IP_SERVER_HOST = '' # bind to default +IP_SERVER_PORT = 8083 +IP_CLIENT_PORT = 5000 +IP_CLIENT_TIMEOUT = 60 +IP_DEFAULT_MODE = 'server' +IP_BUFFER_SIZE = 2048 + +# database defaults +DB_HOST = 'localhost' +DB_USER = 'ecmuser' +DB_PASSWD = 'ecmpass' +DB_DATABASE = 'ecm' +DB_FILENAME = 'ecm.db' # filename for sqlite databases +DB_INSERT_PERIOD = MINUTE # how often to record to database, in seconds +DB_POLL_INTERVAL = 60 # how often to poll the database, in seconds + +# rrd defaults +RRD_DIR = 'rrd' # directory in which to put the rrd files +RRD_STEP = 10 # how often we get samples from the device, in seconds +RRD_HEARTBEAT = 2 * RRD_STEP # typically twice the step, in seconds +# 10s, 5m, 30m, 1h +# 4d at 10s, 60d at 5m, 365d at 30m, 730d at 1h +# 2087836 bytes per rrd file - about 90MB for a gem or 20MB for an ecm1240 +RRD_STEPS = [1, 18, 180, 360] +RRD_RESOLUTIONS = [34560, 17280, 17520, 17520] +# 10s, 1m, 10m, 20m +# 32h at 10s, 12d at 1m, 121.6d at 10m, 243.3d at 20m +# 30s, 5m, 30m, 1h +# 4d at 30s, 60d at 5m, 365d at 30m, 730d at 1h +# 1534876 bytes per rrd file - about 75MB for a gem or 15MB for an ecm1240 +#RRD_STEPS = [1,6,60,120] +#RRD_RESOLUTIONS = [11520, 17280, 17520, 17520] +RRD_UPDATE_PERIOD = 60 # how often to update the rrd files, in seconds +RRD_POLL_INTERVAL = 120 # how often to poll when rrd is source, in seconds + +# WattzOn defaults +# the map is a comma-delimited list of channel,meter pairs. for example: +# 311111_ch1,living room,311112_ch1,parlor,311112_aux4,kitchen +WATTZON_API_URL = 'http://www.wattzon.com/api/2009-01-27/3' +WATTZON_UPLOAD_PERIOD = 5 * MINUTE +WATTZON_TIMEOUT = 15 # seconds +WATTZON_MAP = '' +WATTZON_API_KEY = 'apw6v977dl204wrcojdoyyykr' +WATTZON_USER = '' +WATTZON_PASS = '' + +# PlotWatt defaults +# https://plotwatt.com/docs/api +# Recommended upload period is one minute to a few minutes. Recommended +# sampling as often as possible, no faster than once per second. +# the map is a comma-delimited list of channel,meter pairs. for example: +# 311111_ch1,1234,311112_ch1,1235,311112_aux4,1236 +PLOTWATT_BASE_URL = 'http://plotwatt.com' +PLOTWATT_UPLOAD_URL = '/api/v2/push_readings' +PLOTWATT_UPLOAD_PERIOD = MINUTE +PLOTWATT_TIMEOUT = 15 # seconds +PLOTWATT_MAP = '' +PLOTWATT_HOUSE_ID = '' +PLOTWATT_API_KEY = '' + +# EnerSave defaults +# Minimum upload interval is 60 seconds. +# Recommended sampling interval is 2 to 30 seconds. +# the map is a comma-delimited list of channel,description,type tuples +# 311111_ch1,living room,2,311112_ch2,solar,1,311112_aux4,kitchen,2 +ES_URL = 'http://data.myenersave.com/fetcher/data' +ES_UPLOAD_PERIOD = 5 * MINUTE +ES_TIMEOUT = 60 # seconds +ES_TOKEN = '' +ES_MAP = '' +ES_DEFAULT_TYPE = 2 + +# Bidgely defaults +# Minimum upload interval is 60 seconds. +# Recommended sampling interval is 2 to 30 seconds. +# the map is a comma-delimited list of channel,description,type tuples +# 311111_ch1,living room,2,311112_ch2,solar,1,311112_aux4,kitchen,2 +BY_UPLOAD_PERIOD = 60 # seconds +BY_TIMEOUT = 60 # seconds +BY_MAP = '' +BY_DEFAULT_TYPE = 10 + +# PeoplePower defaults +# http://developer.peoplepowerco.com/docs +# Recommended upload period is 15 minutes. +# the map is a comma-delimited list of channel,meter pairs. for example: +# 311111_ch1,1111c1,311112_ch1,1112c1,311112_aux4,1112a4 +PPCO_URL = 'https://esp.peoplepowerco.com:8443/deviceio/ml' +PPCO_UPLOAD_PERIOD = 15 * MINUTE +PPCO_TIMEOUT = 15 # seconds +PPCO_TOKEN = '' +PPCO_HUBID = 1 +PPCO_MAP = '' +PPCO_FIRST_NONCE = 1 +PPCO_DEVICE_TYPE = 1005 +PPCO_ADD_DEVICES = True # set to false to skip setup if setup already done + +# eragy defaults +ERAGY_URL = 'http://d.myeragy.com/energyremote.aspx' +ERAGY_UPLOAD_PERIOD = MINUTE +ERAGY_TIMEOUT = 15 # seconds +ERAGY_GATEWAY_ID = '' +ERAGY_TOKEN = '' + +# smart energy groups defaults +# http://smartenergygroups.com/api +# the map is a comma-delimited list of channel,meter pairs. for example: +# 311111_ch1,living room,311112_ch1,parlor,311112_aux4,kitchen +SEG_URL = 'http://api.smartenergygroups.com/api_sites/stream' +SEG_UPLOAD_PERIOD = MINUTE +SEG_TIMEOUT = 15 # seconds +SEG_TOKEN = '' +SEG_MAP = '' + +# thingspeak defaults +# http://community.thingspeak.com/documentation/api/ +# Uploads are limited to no more than every 15 seconds per channel. +TS_URL = 'http://api.thingspeak.com/update' +TS_UPLOAD_PERIOD = MINUTE +TS_TIMEOUT = 15 # seconds +TS_TOKENS = '' +TS_FIELDS = '' + +# pachube/cosm defaults +# https://cosm.com/docs/v2/ +PBE_URL = 'http://api.xively.com/v2/feeds' +PBE_UPLOAD_PERIOD = MINUTE +PBE_TIMEOUT = 15 # seconds +PBE_TOKEN = '' +PBE_FEED = '' + +# open energy monitor emoncms defaults +OEM_URL = 'https://localhost/emoncms/api/post' +OEM_UPLOAD_PERIOD = MINUTE +OEM_TIMEOUT = 15 # seconds +OEM_TOKEN = '' +OEM_NODE = None + +# wattvision v0.2 defaults +# https://www.wattvision.com/usr/api +# post as fast as every 15 seconds or as slow as every 20 minutes. +WV_URL = 'http://www.wattvision.com/api/v0.2/elec' +WV_UPLOAD_PERIOD = 120 +WV_TIMEOUT = 15 # seconds +WV_API_ID = '' +WV_API_KEY = '' +WV_SENSOR_ID = '' +WV_CHANNEL = '' + +# pvoutput defaults +# http://www.pvoutput.org/help.html +# using the addstatus interface +# send a sample every 5 to 15 minutes +# use the cumulative flag for energy +PVO_URL = 'http://pvoutput.org/service/r2/addstatus.jsp' +PVO_UPLOAD_PERIOD = 300 +PVO_TIMEOUT = 15 # seconds +PVO_API_KEY = '' +PVO_SYSTEM_ID = '' +PVO_GEN_CHANNEL = '' +PVO_CON_CHANNEL = '' +PVO_TEMP_CHANNEL = '' + +# MQTT defaults +# Recommended upload period matches the sample period. If set slower +# than the sample period, batches of un-timestamped mqtt messages +# will be delivered to the mqtt broker. +# the map is a comma-delimited list of channel,topic pairs. for example: +# 399999_ch1_w,solar,399999_ch1_wh,solar_wh,399999_t1,garage +MQTT_HOST = 'localhost' +MQTT_PORT = 1883 +MQTT_CLIENTID = 'btmon' +MQTT_BASE_TOPIC = '/house/energy' +MQTT_QOS = 0 +MQTT_RETAIN = False +MQTT_MAP = '' +MQTT_UPLOAD_PERIOD = MINUTE + +# InfluxDB defaults +# Minimum upload interval is 60 seconds. +# Recommended sampling interval is 2 to 30 seconds. +INFLUXDB_HOST = 'localhost' +INFLUXDB_PORT = '8086' +INFLUXDB_UPLOAD_PERIOD = 1 * MINUTE +INFLUXDB_TIMEOUT = 60 # seconds +INFLUXDB_USERNAME = '' +INFLUXDB_PASSWORD = '' +INFLUXDB_DATABASE = '' +INFLUXDB_MEASUREMENT = '' +INFLUXDB_MODE = 'col' +INFLUXDB_MAP = '' +INFLUXDB_TAG_MAP = '' +INFLUXDB_DB_SCHEMA = FILTER_DB_SCHEMA_COUNTERS + +import base64 +import bisect +import calendar +import errno +import optparse +import socket +import os +import sys +import time +import traceback +import urllib +import urllib2 + +import warnings +warnings.filterwarnings('ignore', category=DeprecationWarning) # MySQLdb in 2.6 + +# External (Optional) Dependencies +try: + import serial +except ImportError: + serial = None + +try: + import MySQLdb +except ImportError: + MySQLdb = None + +try: + from sqlite3 import dbapi2 as sqlite +except ImportError: + sqlite = None + +try: + import rrdtool +except ImportError: + rrdtool = None + +try: + import cjson as json + # XXX: maintain compatibility w/ json module + setattr(json, 'dumps', json.encode) + setattr(json, 'loads', json.decode) +except ImportError: + try: + import simplejson as json + except ImportError: + import json + +try: + import ConfigParser +except ImportError: + ConfigParser = None + +try: + import paho.mqtt.publish as publish +except ImportError: + publish = None + +try: + from influxdb import InfluxDBClient +except ImportError: + InfluxDBClient = None + +class CounterResetError(Exception): + def __init__(self, msg): + self.msg = msg + def __repr__(self): + return repr(self.msg) + def __str__(self): + return self.msg + +class ReadError(Exception): + def __init__(self, msg): + self.msg = msg + def __repr__(self): + return repr(self.msg) + def __str__(self): + return self.msg + +class EmptyReadError(Exception): + def __init__(self, msg): + self.msg = msg + def __repr__(self): + return repr(self.msg) + def __str__(self): + return self.msg + +class RetriesExceededError(Exception): + def __init__(self, n): + self.msg = 'exceeded maximum number of %d retries' % n + def __repr__(self): + return repr(self.msg) + def __str__(self): + return self.msg + +# logging and error reporting +# +# note that setting the log level to debug will affect the application +# behavior, especially when sampling the serial line, as it changes the +# timing of read operations. +LOG_ERROR = 0 +LOG_WARN = 1 +LOG_INFO = 2 +LOG_DEBUG = 3 +LOGLEVEL = LOG_INFO + +def dbgmsg(msg): + if LOGLEVEL >= LOG_DEBUG: + logmsg(msg) + +def infmsg(msg): + if LOGLEVEL >= LOG_INFO: + logmsg(msg) + +def wrnmsg(msg): + if LOGLEVEL >= LOG_WARN: + logmsg(msg) + +def errmsg(msg): + if LOGLEVEL >= LOG_ERROR: + logmsg(msg) + +def logmsg(msg): + ts = fmttime(time.localtime()) + print "%s %s" % (ts, msg) + +# Helper Functions + +def fmttime(seconds): + return time.strftime("%Y/%m/%d %H:%M:%S", seconds) + +def mkts(seconds): + return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(seconds)) + +def getgmtime(): + return int(time.time()) + +def cleanvalue(s): + """ensure that values read from configuration file are sane""" + s = s.replace('\n', '') # we never want newlines + s = s.replace('\r', '') # or carriage returns + if s.lower() == 'false': + s = False + elif s.lower() == 'true': + s = True + return s + +def pairs2dict(s): + """convert comma-delimited name,value pairs to a dictionary""" + items = s.split(',') + m = dict() + for k, v in zip(items[::2], items[1::2]): + m[k] = v + return m + +def obfuscate_serial(sn): + """obfuscate a serial number - expose only the last 3 digits""" + if OBFUSCATE_SERIALS: + n = len(sn) + s = 'XXX%s' % sn[n-3:n] + else: + s = sn + return s + +def mklabel(sn, channel): + return '%s_%s' % (sn, channel) + +def mkfn(dir, label): + label = label.replace(' ','_') + label = label.replace(':','_') + label = label.replace('/','_') + return '%s/%s.rrd' % (dir, label) + + +# The device class encapsultes device behaviors such as the default packet +# format and default device addressing mechanism for multiplexed devices. + +class BaseDevice(object): + def __init__(self): + pass + + def extract_identifiers(self, s): + return s.split(',') + + def numpackets(self, packetformat): + return 1 + +class ECM1220Device(BaseDevice): + def __init__(self): + super(ECM1220Device, self).__init__() + self.NAME = DEV_ECM1220 + self.DEFAULT_DEVICE_LIST = 'fc' + self.DEFAULT_PACKET_FORMAT = PF_ECM1220BIN + self.CFM_BYTE = chr(0xfc) + + # ensure that each identifier is one of fc, fd, fe, or ff + def check_identifiers(self, s): + for d in s.split(','): + if not (d == 'fc' or d == 'fd' or d == 'fe' or d == 'ff'): + raise Exception("bogus device '%s' in list '%s'" % (d, s)) + + # each ecm1220 responds to the byte fc (and others?) + # each ecm1240 responds to one of fc, fd, fe, or ff + def requestpacket(self, com, ecmid='fc'): + com.send(chr(int(ecmid, 16))) + self._confirm(com) + com.send('SPK') + self._confirm(com) + + def _confirm(self, com): + resp = com.recv(1) + if not resp: + raise Exception('received empty response') + if not resp == self.CFM_BYTE: + raise Exception('wrong response %s, expected %s' % + (hex(ord(resp)), hex(ord(self.CFM_BYTE)))) + +class ECM1240Device(ECM1220Device): + def __init__(self): + super(ECM1240Device, self).__init__() + self.NAME = DEV_ECM1240 + self.DEFAULT_DEVICE_LIST = 'fc' + self.DEFAULT_PACKET_FORMAT = PF_ECM1240BIN + +class GEMDevice(BaseDevice): + def __init__(self): + super(GEMDevice, self).__init__() + self.NAME = DEV_GEM + self.DEFAULT_DEVICE_LIST = '' + self.DEFAULT_PACKET_FORMAT = PF_GEM48PTBIN + + # ensure that each identifier is a 5 digit string + def check_identifiers(self, s): + for d in s.split(','): + if len(d) > 0 and not len(d) == 5: + raise Exception("bogus device '%s' in list '%s'" % (d, s)) + + # use the NMBXXXXX identifier for specific serial numbers. based on + # section 8 of 'GEM Commands and Packet Format6.doc' from october 2012. + # multiplexed devices work only with GEM com firmware 1.61 or later. + def requestpacket(self, com, gemid=''): + idstr = '' + if len(gemid) > 0: + idstr = 'NMB%s' % gemid + com.send('^^^%sAPISPK' % idstr) + + # when sending data in the ecm1240 binary format, the GEM sends 5 virtual + # packets, each with a different serial number. + def numpackets(self, packetformat): + if packetformat == PF_ECM1240BIN: + return 5 + return 1 + + +# The packet class encapsultes packet characteristics, including how to read +# packet information. + +class BasePacket(object): + def __init__(self): + self.KEEPALIVE_GEM = 36 + self.KEEPALIVE_ECM = 0 + self.SEC_COUNTER_MAX = 16777216 # 256^3 + self.BYTE4_COUNTER_MAX = 4294967296 # 256^4 + self.BYTE5_COUNTER_MAX = 1099511627776 # 256^5 + self.START_HEADER0 = 254 + self.START_HEADER1 = 255 + self.END_HEADER0 = 255 + self.END_HEADER1 = 254 + self.DATA_BYTES_LENGTH = 0 # must be defined by derived class + self.PACKET_ID = 0 # must be defined by derived class + + def _convert(self, b): + return reduce(lambda x,y:x+y[0] * (256**y[1]), zip(b, xrange(len(b))), 0) + + def _serialraw(self, packet): + """extract the serial number from a raw packet""" + return '0000' + + def _getserial(self, packet): + """get the serial number from a compiled packet""" + return self._fmtserial(packet['unit_id'], packet['ser_no']) + + def _fmtserial(self, devid, sn): + return '%03d%05d' % (devid, sn) + + def _calculate_checksum(self, packet, devid): + """calculate the packet checksum""" + checksum = self.START_HEADER0 + checksum += self.START_HEADER1 + checksum += devid + checksum += sum(packet) + checksum += self.END_HEADER0 + checksum += self.END_HEADER1 + return checksum & 0xff + + def _checkbyte(self, data, label, evalue): + if not data: + raise EmptyReadError("expected %s %s, got nothing" % + (label, hex(evalue))) + b = ord(data) + if b != evalue: + raise ReadError("expected %s %s, got %s" % + (label, hex(evalue), hex(b))) + + # for now (oct2012), this is the read method that we use + def _read1(self, collector, pktlen, pktid): + data = collector.readbytes(1) + self._checkbyte(data, 'START_HEADER0', self.START_HEADER0) + data = collector.readbytes(1) + self._checkbyte(data, 'START_HEADER1', self.START_HEADER1) + + data = collector.readbytes(1) + self._checkbyte(data, 'PACKET_ID', pktid) + + packet = '' + while len(packet) < pktlen: + data = collector.readbytes(pktlen - len(packet)) + if not data: # No data left + raise ReadError('no data after %d bytes' % len(packet)) + packet += data + + if len(packet) < pktlen: + raise ReadError("incomplete packet: expected %d bytes, got %d" % + (pktlen, len(packet))) + + data = collector.readbytes(1) + self._checkbyte(data, 'END_HEADER0', self.END_HEADER0) + data = collector.readbytes(1) + self._checkbyte(data, 'END_HEADER1', self.END_HEADER1) + + pkt = [ord(c) for c in packet] + + # if the checksum is incorrect, ignore the packet + checksum = self._calculate_checksum(pkt, pktid) + data = collector.readbytes(1) + b = ord(data) + if b != checksum: + raise ReadError("bad checksum for %s: expected %s, got %s" % + (self._serialraw(packet), hex(checksum), hex(b))) + + return [pkt] + + def _calc_secs(self, newpkt, oldpkt): + ds = float(newpkt['secs'] - oldpkt['secs']) + if newpkt['secs'] < oldpkt['secs']: + ds += self.SEC_COUNTER_MAX + wrnmsg('seconds counter wraparound detected') + return ds + + # Absolute watt counter increases no matter which way the current goes + # Polarized watt counter only increase if the current is positive + # Every polarized count registers as an absolute count + def _calc_pe(self, tag, ds, ret, prev): + """calculate power and energy for a 5-byte polarized counter""" + + # FIXME: there is a corner case here when the absolute watt-second + # counter wraps but the polarized watt-second counter does not, or + # vice-versa. + + # Detect counter wraparound and deal with it + daws = ret[tag+'_aws'] - prev[tag+'_aws'] + if ret[tag+'_aws'] < prev[tag+'_aws']: + daws += self.BYTE5_COUNTER_MAX + wrnmsg('energy counter wraparound detected for %s' % tag) + dpws = ret[tag+'_pws'] - prev[tag+'_pws'] + if ret[tag+'_pws'] < prev[tag+'_pws']: + dpws += self.BYTE5_COUNTER_MAX + wrnmsg('polarized energy counter wraparound detected for %s' % tag) + + # Calculate average power over the time period + ret[tag+'_w'] = daws / ds + pw = dpws / ds + nw = ret[tag+'_w'] - pw + + if REVERSE_POLARITY: + ret[tag+'_pw'] = nw + ret[tag+'_nw'] = pw + else: + ret[tag+'_pw'] = pw + ret[tag+'_nw'] = nw + + # The polarized count goes up only if the sign is positive, so use the + # value of polarized count to determine the sign of overall watts + if (ret[tag+'_pw'] == 0): + ret[tag+'_w'] *= -1 + + # calculate the watt-hour count and delta + pwh = ret[tag+'_pws'] / SpH + nwh = (ret[tag+'_aws'] - ret[tag+'_pws']) / SpH + prev_dwh = 0 + if REVERSE_POLARITY: + ret[tag+'_pwh'] = nwh + ret[tag+'_nwh'] = pwh + prev_dwh = (prev[tag+'_aws'] - 2*prev[tag+'_pws']) / SpH + else: + ret[tag+'_pwh'] = pwh + ret[tag+'_nwh'] = nwh + prev_dwh = (2*prev[tag+'_pws'] - prev[tag+'_aws']) / SpH + ret[tag+'_wh'] = ret[tag+'_pwh'] - ret[tag+'_nwh'] + ret[tag+'_dwh'] = ret[tag+'_wh'] - prev_dwh + + def _calc_pe_4byte(self, tag, ds, ret, prev): + """calculate power and energy for a 4-byte non-polarized counter""" + + dws = ret[tag+'_ws'] - prev[tag+'_ws'] + if ret[tag+'_ws'] < prev[tag+'_ws']: + dws += self.BYTE4_COUNTER_MAX + wrnmsg('energy counter wraparound detected for %s' % tag) + + ret[tag+'_w'] = dws / ds + ret[tag+'_wh'] = ret[tag+'_ws'] / SpH + ret[tag+'_dwh'] = dws / SpH + + def channels(self, fltr): + """return a list of data sources for this packet type""" + return [] + + def read(self, collector): + """read data from collector, return an array of raw packets""" + return self._read1(collector, self.DATA_BYTES_LENGTH, self.PACKET_ID) + + def compile(self, packet): + """convert a raw packet into a compiled packet""" + pass + + def calculate(self, newer, older): + """calculate difference between two packets""" + pass + + def printPacket(self, packet): + pass + +class ECMBinaryPacket(BasePacket): + def __init__(self): + BasePacket.__init__(self) + + def _getresetcounter(self, b): + """extract the reset counter from a byte""" + return b & 0x07 # 0b00000111 is not recognized by python 2.5 + + def _serialraw(self, packet): + sn1 = ord(packet[26:27]) + sn2 = ord(packet[27:28]) * 256 + id1 = ord(packet[29:30]) + return self._fmtserial(id1, sn1 + sn2) + + def _fmtserial(self, ecmid, sn): + """ECM serial numbers are 6 characters - unit id then serial""" + s = '%d' % ecmid + return s[-1:] + '%05d' % sn + +class ECM1220BinaryPacket(ECMBinaryPacket): + def __init__(self): + ECMBinaryPacket.__init__(self) + self.NAME = PF_ECM1220BIN + self.PACKET_ID = 1 + self.DATA_BYTES_LENGTH = 37 # does not include the start/end headers + self.NUM_CHAN = 2 + + def channels(self, fltr): + c = [] + if fltr == FILTER_PE_LABELS: + c = ['ch1', 'ch2'] + elif fltr == FILTER_POWER: + c = ['ch1_w', 'ch2_w'] + elif fltr == FILTER_ENERGY: + c = ['ch1_wh', 'ch2_wh'] + elif fltr == FILTER_DB_SCHEMA_COUNTERS: + c = ['volts', 'ch1_a', 'ch2_a', 'ch1_aws', 'ch2_aws', 'ch1_pws', 'ch2_pws'] + return c + + def compile(self, rpkt): + """compile a raw packet into a compiled packet""" + cpkt = dict() + + # Voltage Data (2 bytes) + cpkt['volts'] = 0.1 * self._convert(rpkt[1::-1]) + + # CH1-2 Absolute Watt-Second Counter (5 bytes each) + cpkt['ch1_aws'] = self._convert(rpkt[2:7]) + cpkt['ch2_aws'] = self._convert(rpkt[7:12]) + + # CH1-2 Polarized Watt-Second Counter (5 bytes each) + cpkt['ch1_pws'] = self._convert(rpkt[12:17]) + cpkt['ch2_pws'] = self._convert(rpkt[17:22]) + + # Reserved (4 bytes) + + # Device Serial Number (2 bytes) + cpkt['ser_no'] = self._convert(rpkt[26:28]) + + # Reset and Polarity Information (1 byte) + cpkt['flag'] = self._convert(rpkt[28:29]) + + # Device Information (1 byte) + cpkt['unit_id'] = self._convert(rpkt[29:30]) + + # CH1-2 Current (2 bytes each) + cpkt['ch1_a'] = 0.01 * self._convert(rpkt[30:32]) + cpkt['ch2_a'] = 0.01 * self._convert(rpkt[32:34]) + + # Seconds (3 bytes) + cpkt['secs'] = self._convert(rpkt[34:37]) + + # Use the current time as the timestamp + cpkt['time_created'] = getgmtime() + + # Add a formatted serial number + cpkt['serial'] = self._getserial(cpkt) + + return cpkt + + def calculate(self, now, prev): + """calculate watts and watt-hours from watt-second counters""" + + # if reset counter has changed since last packet, skip the calculation + c0 = self._getresetcounter(prev['flag']) + c1 = self._getresetcounter(now['flag']) + if c1 != c0: + raise CounterResetError("old: %d new: %d" % (c0, c1)) + + ret = now + ds = self._calc_secs(ret, prev) + self._calc_pe('ch1', ds, ret, prev) + self._calc_pe('ch2', ds, ret, prev) + + return ret + + def printPacket(self, p): + ts = fmttime(time.localtime(p['time_created'])) + + print ts+": Serial: %s" % p['serial'] + print ts+": Counter: %d" % self._getresetcounter(p['flag']) + print ts+": Voltage: %9.2fV" % p['volts'] + for x in range(1, self.NUM_CHAN + 1): + print ts+": Ch%d Current: %9.2fA" % (x, p['ch%d_a' % x]) + for x in range(1, self.NUM_CHAN + 1): + print ts+": Ch%d Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_wh' % x]/1000, p['ch%d_w' % x]) + print ts+": Ch%d Positive Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_pwh' % x]/1000, p['ch%d_pw' % x]) + print ts+": Ch%d Negative Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_nwh' % x]/1000, p['ch%d_nw' % x]) + + +class ECM1240BinaryPacket(ECM1220BinaryPacket): + def __init__(self): + ECM1220BinaryPacket.__init__(self) + self.NAME = PF_ECM1240BIN + self.PACKET_ID = 3 + self.DATA_BYTES_LENGTH = 59 # does not include the start/end headers + self.NUM_CHAN = 2 + self.NUM_AUX = 5 + + def channels(self, fltr): + c = [] + if fltr == FILTER_PE_LABELS: + c = ['ch1', 'ch2', 'aux1', 'aux2', 'aux3', 'aux4', 'aux5'] + elif fltr == FILTER_POWER: + c = ['ch1_w', 'ch2_w', 'aux1_w', 'aux2_w', 'aux3_w', 'aux4_w', 'aux5_w'] + elif fltr == FILTER_ENERGY: + c = ['ch1_wh', 'ch2_wh', 'aux1_wh', 'aux2_wh', 'aux3_wh', 'aux4_wh', 'aux5_wh'] + elif fltr == FILTER_DB_SCHEMA_ECMREAD: + c = ['volts', 'ch1_amps', 'ch2_amps', 'ch1_w', 'ch2_w', 'aux1_w', 'aux2_w', 'aux3_w', 'aux4_w', 'aux5_w'] + elif fltr == FILTER_DB_SCHEMA_ECMREADEXT: + c = ['volts', 'ch1_a', 'ch2_a', 'ch1_w', 'ch2_w', 'aux1_w', 'aux2_w', 'aux3_w', 'aux4_w', 'aux5_w', 'ch1_wh', 'ch2_wh', 'aux1_wh', 'aux2_wh', 'aux3_wh', 'aux4_wh', 'aux5_wh', 'ch1_whd', 'ch2_whd', 'aux1_whd', 'aux2_whd', 'aux3_whd', 'aux4_whd', 'aux5_whd', 'ch1_pw', 'ch1_nw', 'ch2_pw', 'ch2_nw', 'ch1_pwh', 'ch1_nwh', 'ch2_pwh', 'ch2_nwh'] + elif fltr == FILTER_DB_SCHEMA_COUNTERS: + c = ['volts', 'ch1_a', 'ch2_a', 'ch1_aws', 'ch2_aws', 'ch1_pws', 'ch2_pws', 'aux1_ws', 'aux2_ws', 'aux3_ws', 'aux4_ws', 'aux5_ws', 'aux5_volts'] + return c + + def compile(self, rpkt): + cpkt = ECM1220BinaryPacket.compile(self, rpkt) + + # AUX1-5 Watt-Second Counter (4 bytes each) + cpkt['aux1_ws'] = self._convert(rpkt[37:41]) + cpkt['aux2_ws'] = self._convert(rpkt[41:45]) + cpkt['aux3_ws'] = self._convert(rpkt[45:49]) + cpkt['aux4_ws'] = self._convert(rpkt[49:53]) + cpkt['aux5_ws'] = self._convert(rpkt[53:57]) + + # DC voltage on AUX5 (2 bytes) + cpkt['aux5_volts'] = self._convert(rpkt[57:59]) + + return cpkt + + def calculate(self, now, prev): + ret = ECM1220BinaryPacket.calculate(self, now, prev) + ds = self._calc_secs(ret, prev) + self._calc_pe_4byte('aux1', ds, ret, prev) + self._calc_pe_4byte('aux2', ds, ret, prev) + self._calc_pe_4byte('aux3', ds, ret, prev) + self._calc_pe_4byte('aux4', ds, ret, prev) + self._calc_pe_4byte('aux5', ds, ret, prev) + + return ret + + def printPacket(self, p): + ts = fmttime(time.localtime(p['time_created'])) + + print ts+": Serial: %s" % p['serial'] + print ts+": Counter: %d" % self._getresetcounter(p['flag']) + print ts+": Voltage: %9.2fV" % p['volts'] + for x in range(1, self.NUM_CHAN + 1): + print ts+": Ch%d Current: %9.2fA" % (x, p['ch%d_a' % x]) + for x in range(1, self.NUM_CHAN + 1): + print ts+": Ch%d Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_wh' % x]/1000, p['ch%d_w' % x]) + print ts+": Ch%d Positive Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_pwh' % x]/1000, p['ch%d_pw' % x]) + print ts+": Ch%d Negative Watts: % 13.6fKWh (% 5dW)" % (x, p['ch%d_nwh' % x]/1000, p['ch%d_nw' % x]) + for x in range(1, self.NUM_AUX + 1): + print ts+": Aux%d Watts: % 13.6fKWh (% 5dW)" % (x, p['aux%d_wh' % x]/1000, p['aux%d_w' % x]) + + +# GEM binary packet with 48 channels, polarization +class GEM48PBinaryPacket(BasePacket): + def __init__(self): + BasePacket.__init__(self) + self.NAME = PF_GEM48PBIN + self.PACKET_ID = 5 + self.DATA_BYTES_LENGTH = 613 # does not include the start/end headers + self.NUM_CHAN = 32 # there are 48 channels, but only 32 usable + self.NUM_SENSE = 8 + self.NUM_PULSE = 4 + + def _serialraw(self, packet): + sn1 = ord(packet[481:482]) + sn2 = ord(packet[482:483]) * 256 + id1 = ord(packet[485:486]) + return self._fmtserial(id1, sn1 + sn2) + + def _fmtserial(self, gemid, sn): + """GEM serial numbers are 8 characters - unit id then serial""" + return "%03d%05d" % (gemid, sn) + + def _mktemperature(self, b): + # firmware 1.61 and older use this for temperature +# t = 0.5 * self._convert(b) + + # firmware later than 1.61 uses this for temperature + t = 0.5 * ((b[1] & 0x7f) << 8 | b[0]) + if (b[1] >> 7) != 0: + t = -t + + # check for bogus values that indicate no sensor + if t > 255: + t = 0 # should be None, but currently no notion of 'no data' + return t + + # for now we emit only the first 32 channels. the additional 16 are not + # yet accessible. + def channels(self, fltr): + c = [] + if fltr == FILTER_PE_LABELS: + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d' % x) + elif fltr == FILTER_CURRENT: + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_a' % x) + elif fltr == FILTER_POWER: + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_w' % x) + elif fltr == FILTER_ENERGY: + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_wh' % x) + elif fltr == FILTER_PULSE: + for x in range(1, self.NUM_PULSE + 1): + c.append('p%d' % x) + elif fltr == FILTER_SENSOR: + for x in range(1, self.NUM_SENSE + 1): + c.append('t%d' % x) + elif fltr == FILTER_DB_SCHEMA_COUNTERS: + c = ['volts'] + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_aws' % x) + c.append('ch%d_pws' % x) + for x in range(1, self.NUM_PULSE + 1): + c.append('p%d' % x) + for x in range(1, self.NUM_SENSE + 1): + c.append('t%d' % x) + elif fltr == FILTER_DB_SCHEMA_ECMREAD: + c = ['volts'] + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_a' % x) + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_w' % x) + elif fltr == FILTER_DB_SCHEMA_ECMREADEXT: + c = ['volts'] + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_a' % x) + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_w' % x) + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_wh' % x) + for x in range(1, self.NUM_CHAN + 1): + c.append('ch%d_dwh' % x) + for x in range(1, self.NUM_PULSE + 1): + c.append('p%d' % x) + for x in range(1, self.NUM_SENSE + 1): + c.append('t%d' % x) + + return c + + def compile(self, rpkt): + cpkt = dict() + + # Voltage Data (2 bytes) + cpkt['volts'] = 0.1 * self._convert(rpkt[1::-1]) + + # Absolute/Polarized Watt-Second Counters (5 bytes each) + for x in range(1, self.NUM_CHAN+1): + cpkt['ch%d_aws' % x] = self._convert(rpkt[2+5*(x-1):2+5*x]) + cpkt['ch%d_pws' % x] = self._convert(rpkt[242+5*(x-1):242+5*x]) + + # Device Serial Number (2 bytes) + cpkt['ser_no'] = self._convert(rpkt[483:481:-1]) + + # Reserved (1 byte) + + # Device Information (1 byte) + cpkt['unit_id'] = self._convert(rpkt[485:486]) + + # Current (2 bytes each) + if INCLUDE_CURRENT: + for x in range(1, self.NUM_CHAN+1): + cpkt['ch%d_a' % x] = 0.02 * self._convert(rpkt[486+2*(x-1):486+2*x]) + + # Seconds (3 bytes) + cpkt['secs'] = self._convert(rpkt[582:585]) + + # Pulse Counters (3 bytes each) + for x in range(1, self.NUM_PULSE + 1): + cpkt['p%d' % x] = self._convert(rpkt[585+3*(x-1):585+3*x]) + + # One-Wire Sensors (2 bytes each) + # the 0.5 multiplier is for DS18B20 sensors +# for x in range(1,self.NUM_SENSE+1): +# cpkt['t%d' % x] = 0.5 * self._convert(rpkt[597+2*(x-1):597+2*x]) + for x in range(1, self.NUM_SENSE + 1): + cpkt['t%d' % x] = self._mktemperature(rpkt[597+2*(x-1):597+2*x]) + + # Footer (2 bytes) + + # Add the current time as the timestamp + cpkt['time_created'] = getgmtime() + + # Add a formatted serial number + cpkt['serial'] = self._getserial(cpkt) + + return cpkt + + def calculate(self, now, prev): + """calculate watts and watt-hours from watt-second counters""" + + # FIXME: check the reset flag once that is supported in gem packets + # until then, if counter drops we assume it is due to a reset + for x in range(1, self.NUM_CHAN + 1): + tag = 'ch%d' % x + c0 = prev[tag + '_aws'] + c1 = now[tag + '_aws'] + if c1 < c0: + raise CounterResetError("channel: %s old: %d new: %d" % + (tag, c0, c1)) + + ret = now + ds = self._calc_secs(ret, prev) + for x in range(1, self.NUM_CHAN + 1): + tag = 'ch%d' % x + self._calc_pe(tag, ds, ret, prev) + + return ret + + def printPacket(self, p): + ts = fmttime(time.localtime(p['time_created'])) + + print ts+": Serial: %s" % p['serial'] + print ts+": Voltage: % 6.2fV" % p['volts'] + for x in range(1, self.NUM_CHAN + 1): + if INCLUDE_CURRENT: + print ts+": Ch%02d: % 13.6fKWh (% 5dW) (% 7.2fA)" % (x, p['ch%d_wh' % x]/1000, p['ch%d_w' % x], p['ch%d_a' % x]) + else: + print ts+": Ch%02d: % 13.6fKWh (% 5dW)" % (x, p['ch%d_wh' % x]/1000, p['ch%d_w' % x]) + for x in range(1, self.NUM_PULSE + 1): + print ts+": p%d: % 15d" % (x, p['p%d' % x]) + for x in range(1, self.NUM_SENSE + 1): + print ts+": t%d: % 15.6f" % (x, p['t%d' % x]) + + +# GEM binary packet with 48 channels, polarization, time stamp +class GEM48PTBinaryPacket(GEM48PBinaryPacket): + def __init__(self): + GEM48PBinaryPacket.__init__(self) + self.NAME = PF_GEM48PTBIN + self.PACKET_ID = 5 + self.DATA_BYTES_LENGTH = 619 # does not include the start/end headers + self.USE_PACKET_TIMESTAMP = TRUST_DEVICE_CLOCK # trust the GEM clock? + + def compile(self, rpkt): + cpkt = GEM48PBinaryPacket.compile(self, rpkt) + + # Time Stamp (1 byte each) + cpkt['year'] = self._convert(rpkt[613:614]) + cpkt['mth'] = self._convert(rpkt[614:615]) + cpkt['day'] = self._convert(rpkt[615:616]) + cpkt['hr'] = self._convert(rpkt[616:617]) + cpkt['min'] = self._convert(rpkt[617:618]) + cpkt['sec'] = self._convert(rpkt[618:619]) + + # Add the timestamp as epoch + if self.USE_PACKET_TIMESTAMP: + tstr = '20%d.%d.%d %d:%d:%d' % ( + cpkt['year'], cpkt['mth'], cpkt['day'], + cpkt['hr'], cpkt['min'], cpkt['sec']) + time_tuple = time.strptime(tstr, '%Y.%m.%d %H:%M:%S') + if DEVICE_CLOCK_IS_UTC: + cpkt['time_created'] = int(calendar.timegm(time_tuple)) + else: + cpkt['time_created'] = int(time.mktime(time_tuple)) + + return cpkt + + +# The schema classes encapsulate the structure for saving data to and reading +# data from databases. + +class BaseSchema(object): + def __init__(self): + pass + + def gettablesql(self, idopt=''): + return '' + + def db2pkt(self, row): + return dict() + + def getinsertsql(self, packet): + return '' + + def _gettype(self, channel): + if channel.endswith('_aws') or channel.endswith('_pws') or channel.endswith('_ws'): + return 'bigint' + if channel[0] == 'p': + return 'bigint' + if channel.endswith('volts') or channel[0] == 't' or channel.endswith('_a') or channel.endswith('_amps') or channel.endswith('_w') or channel.endswith('_wh') or channel.endswith('_pwh') or channel.endswith('_nwh') or channel.endswith('_pw') or channel.endswith('_nw') or channel.endswith('_whd'): + return 'float' + return 'bigint' + + +class CountersSchema(BaseSchema): + def __init__(self): + super(CountersSchema, self).__init__() + self.NAME = DB_SCHEMA_COUNTERS + + def gettablesql(self, idopt=''): + sql = [] + sql.append('(id bigint primary key %s' % idopt) + sql.append(', time_created bigint') + sql.append(', serial varchar(10)') + sql.append(', secs int') + for c in PACKET_FORMAT.channels(self.NAME): + sql.append(', %s %s' % (c, self._gettype(c))) + sql.append(')') + return ''.join(sql) + + def db2pkt(self, row): + pkt = dict(row) + pkt['flag'] = 0 # fake it + return pkt + + def getinsertsql(self, p): + labels = ['time_created', 'serial', 'secs'] + values = [str(p['time_created']), "'"+p['serial']+"'", str(p['secs'])] + for c in PACKET_FORMAT.channels(self.NAME): + labels.append(c) + values.append(str(p[c])) + sql = [] + sql.append('(') + sql.append(','.join(labels)) + sql.append(') VALUES (') + sql.append(','.join(values)) + sql.append(')') + return ''.join(sql), len(values), p['serial'], p['time_created'] + + +class ECMReadSchema(BaseSchema): + def __init__(self): + super(ECMReadSchema, self).__init__() + self.NAME = DB_SCHEMA_ECMREAD + + def gettablesql(self, idopt=''): + sql = [] + sql.append('(id bigint primary key %s' % idopt) + sql.append(', time_created bigint') + sql.append(', ecm_serial varchar(10)') + for c in PACKET_FORMAT.channels(self.NAME): + sql.append(', %s %s' % (c, self._gettype(c))) + sql.append(')') + return ''.join(sql) + + def db2pkt(self, row): + pkt = dict() + for key in row.keys(): + pktkey = key + if key.endswith('_amps'): + pktkey = key.replace('_amps', '_a') + if key == 'ecm_serial': + pktkey = 'serial' + pkt[pktkey] = row[key] + return pkt + + def getinsertsql(self, p): + labels = ['time_created', 'ecm_serial'] + values = [str(p['time_created']), "'"+p['serial']+"'"] + for c in PACKET_FORMAT.channels(self.NAME): + pktkey = c.replace('_amps', '_a') + labels.append(c) + values.append(str(p[pktkey])) + sql = [] + sql.append('(') + sql.append(','.join(labels)) + sql.append(') VALUES (') + sql.append(','.join(values)) + sql.append(')') + return ''.join(sql), len(values), p['serial'], p['time_created'] + + +class ECMReadExtSchema(BaseSchema): + def __init__(self): + super(ECMReadExtSchema, self).__init__() + self.NAME = DB_SCHEMA_ECMREADEXT + + def gettablesql(self, idopt=''): + sql = [] + sql.append('(id bigint primary key %s' % idopt) + sql.append(', time_created bigint') + sql.append(', ecm_serial varchar(10)') + for c in PACKET_FORMAT.channels(self.NAME): + sql.append(', %s %s' % (c, self._gettype(c))) + sql.append(')') + return ''.join(sql) + + def db2pkt(self, row): + pkt = dict() + for key in row.keys(): + pktkey = key + if key.endswith('_whd'): + pktkey = key.replace('_whd', '_dwh') + if key == 'ecm_serial': + pktkey = 'serial' + pkt[pktkey] = row[key] + return pkt + + def getinsertsql(self, p): + labels = ['time_created', 'ecm_serial'] + values = [str(p['time_created']), "'"+p['serial']+"'"] + for c in PACKET_FORMAT.channels(self.NAME): + labels.append(c) + pktkey = c.replace('_whd', '_dwh') + values.append(str(p[pktkey])) + sql = [] + sql.append('(') + sql.append(','.join(labels)) + sql.append(') VALUES (') + sql.append(','.join(values)) + sql.append(')') + return ''.join(sql), len(values), p['serial'], p['time_created'] + + +# The monitor contains the application control logic, tying together the +# communications mechanism with the data collection and data processing. + +class Monitor(object): + def __init__(self, packet_collector, packet_processors): + self.packet_collector = packet_collector + self.packet_processors = packet_processors + dbgmsg('packet format is %s' % PACKET_FORMAT.__class__.__name__) + dbgmsg('using collector %s' % packet_collector.__class__.__name__) + dbgmsg('using %d processors:' % len(self.packet_processors)) + for p in self.packet_processors: + dbgmsg(' %s' % p.__class__.__name__) + + def read(self): + self.packet_collector.read(PACKET_FORMAT) + + def process(self): + dbgmsg('buffer info:') + for sn in self.packet_collector.packet_buffer.getkeys(): + dbgmsg(' %s: %3d of %3d (%d)' % + (sn, + self.packet_collector.packet_buffer.size(sn), + self.packet_collector.packet_buffer.maxsize, + self.packet_collector.packet_buffer.lastmod(sn))) + for p in self.packet_processors: + try: + dbgmsg('processing with %s' % p.__class__.__name__) + p.process_compiled(self.packet_collector.packet_buffer) + except Exception, e: + if not p.handle(e): + wrnmsg('Exception in %s: %s' % (p.__class__.__name__, e)) + if LOGLEVEL >= LOG_DEBUG: + traceback.print_exc() + + def run(self): + try: + dbgmsg('setup %s' % self.packet_collector.__class__.__name__) + self.packet_collector.setup() + for p in self.packet_processors: + dbgmsg('setup %s' % p.__class__.__name__) + p.setup() + + while True: + self.read() + self.process() + + except KeyboardInterrupt: + sys.exit(0) + except Exception, e: + errmsg(e) + if LOGLEVEL >= LOG_DEBUG: + traceback.print_exc() + sys.exit(1) + + finally: + for p in self.packet_processors: + dbgmsg('cleanup %s' % p.__class__.__name__) + p.cleanup() + dbgmsg('cleanup %s' % self.packet_collector.__class__.__name__) + self.packet_collector.cleanup() + + +# Data Collector classes +# +# all of the collectors are buffered - they contain an array of packets, sorted +# by timestamp and grouped by the serial number of the brultech device. +class BufferedDataCollector(object): + def __init__(self): + self.packet_buffer = CompoundBuffer(BUFFER_SIZE) + + # Read the indicated number of bytes. This should be overridden by derived + # classes to do the actual reading of bytes. + def readbytes(self, count): + return '' + + def setup(self): + pass + + def cleanup(self): + pass + + def open(self): + pass + + def close(self): + pass + + def read(self, packet_format): + self._blockingread(packet_format) + + # The default reader invokes the read method of the packet format. The + # device type tells us how many packets to expect. + def _read(self, packet_format): + maxread = DEVICE_TYPE.numpackets(packet_format.NAME) + packets = [] + nread = 0 + while nread < maxread: + nread += 1 + dbgmsg('reading %d of %d packets' % (nread, maxread)) + packets.extend(packet_format.read(self)) + for p in packets: + p = packet_format.compile(p) + self.packet_buffer.insert(p['time_created'], p) + + # This is a helper method for derived classes that do blocking reads. + def _blockingread(self, packet_format): + dbgmsg('waiting for data from device') + havedata = False + nerr = 0 + while (nerr < READ_RETRIES or READ_RETRIES == 0) and not havedata: + try: + self.open() + self._read(packet_format) + havedata = True + except ReadError, e: + dbgmsg('read failed: %s' % e.msg) + except KeyboardInterrupt, e: + raise e + except (EmptyReadError, Exception), e: + nerr += 1 + dbgmsg('failed read %d of %d' % (nerr, READ_RETRIES)) + errmsg(e) + if LOGLEVEL >= LOG_DEBUG: + traceback.print_exc() + self.close() + dbgmsg('waiting %d seconds before retry' % RETRY_WAIT) + time.sleep(RETRY_WAIT) + if not havedata: + raise RetriesExceededError(READ_RETRIES) + + # This is a helper method for derived classes that do polling. for each + # device, make a request for a packet, then do a standard read for the + # response. catch the exceptions so that they do not cause the processing + # to abort prematurely. for each device, permit a limited number of + # failures before bailing out. + def _pollingread(self, packet_format, device_list): + for did in device_list: + havedata = False + ntries = 0 + while ntries < POLL_RETRIES and not havedata: + ntries += 1 + try: + dbgmsg('sending request %d to device %s' % (ntries, did)) + DEVICE_TYPE.requestpacket(self, did) + dbgmsg('waiting for data from device %s' % did) + self._read(packet_format) + havedata = True + except ReadError, e: + dbgmsg('read failed: %s' % e.msg) + except KeyboardInterrupt, e: + raise e + except Exception, e: + dbgmsg('failed request %d of %d for device %s' % (ntries, POLL_RETRIES, did)) + errmsg(e) + if LOGLEVEL >= LOG_DEBUG: + traceback.print_exc() + if not havedata: + wrnmsg('%d requests failed for device %s' % (POLL_RETRIES, did)) + + +class SerialCollector(BufferedDataCollector): + def __init__(self, port, rate): + if not serial: + print 'Serial Error: serial module could not be imported.' + sys.exit(1) + + super(SerialCollector, self).__init__() + self._port = port + self._baudrate = int(rate) + self._conn = None + infmsg('SERIAL: serial port: %s' % self._port) + infmsg('SERIAL: baud rate: %d' % self._baudrate) + + def readbytes(self, count): + return self._conn.read(count) + + def open(self): + if self._conn is not None: + return + dbgmsg('SERIAL: opening connection to %s at %d' % + (self._port, self._baudrate)) + self._conn = serial.Serial(self._port, self._baudrate) + + def close(self): + if self._conn: + dbgmsg('SERIAL: closing connection') + self._conn.close() + self._conn = None + +# the polling collector opens a serial connection, makes a request for data, +# retrieves the data, then closes the connection. typically this is used with +# the brultech device not in real-time mode. it supports multiplexed devices. +class PollingSerialCollector(SerialCollector): + def __init__(self, port, rate, poll_interval): + super(PollingSerialCollector, self).__init__(port, rate) + self._poll_interval = int(poll_interval) + infmsg('SERIAL: poll interval: %d' % self._poll_interval) + + def read(self, packet_format): + dbgmsg('SERIAL: waiting for %d seconds' % self._poll_interval) + time.sleep(self._poll_interval) + try: + self.open() + self._pollingread(packet_format, DEVICE_LIST) + finally: + self.close() + + def send(self, s): + dbgmsg('SERIAL: sending %s' % s) + self._conn.write(s) + + def recv(self, sz=SERIAL_BUFFER_SIZE): + try: + dbgmsg('SERIAL: waiting for %d bytes' % sz) + resp = self._conn.read(sz) + while len(resp) < sz: + dbgmsg('SERIAL: waiting for %d bytes' % (sz - len(resp))) + resp += self._conn.read(sz - len(resp)) + return resp + except Exception, e: + dbgmsg('SERIAL: exception while receiving') + raise e + + +# the blocking collector opens a serial connection then blocks for any data +# sent over the connection. typically this is used with the brultech device +# emitting data in real-time mode. multiplexed devices may suffer from +# collisions. +class BlockingSerialCollector(SerialCollector): + def __init__(self, port, rate): + super(BlockingSerialCollector, self).__init__(port, rate) + + def read(self, packet_format): + self._blockingread(packet_format) + + +class SocketClientCollector(BufferedDataCollector): + def __init__(self, host, port): + if not host: + print 'Socket Error: no host specified' + sys.exit(1) + + super(SocketClientCollector, self).__init__() + socket.setdefaulttimeout(IP_CLIENT_TIMEOUT) + self._host = host + self._port = int(port) + self._sock = None + infmsg('SOCKET: timeout: %d' % IP_CLIENT_TIMEOUT) + infmsg('SOCKET: server host: %s' % self._host) + infmsg('SOCKET: server port: %d' % self._port) + + def readbytes(self, count): + return self._sock.recv(count) + + def open(self): + if self._sock is not None: + return + dbgmsg('opening socket connection to %s:%d' % (self._host, self._port)) + self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._sock.connect((self._host, self._port)) + + def close(self): + if self._sock: + dbgmsg('closing socket connection') + self._sock.close() + self._sock = None + + +# the polling collector opens a socket, makes a request for data, retrieves the +# data, then closes the socket. typically this is used with a wiznet in server +# mode and the brultech device not in real-time mode. it supports multiplexed +# devices. +class PollingSocketClientCollector(SocketClientCollector): + def __init__(self, host, port, poll_interval): + super(PollingSocketClientCollector, self).__init__(host, port) + self._poll_interval = int(poll_interval) + infmsg('SOCKET: poll interval: %d' % self._poll_interval) + + def read(self, packet_format): + dbgmsg('SOCKET: waiting for %d seconds' % self._poll_interval) + time.sleep(self._poll_interval) + try: + self.open() + self._pollingread(packet_format, DEVICE_LIST) + except socket.timeout: + dbgmsg('SOCKET: timeout while connecting') + except socket.error, e: + if e.errno == errno.EHOSTUNREACH: + dbgmsg('SOCKET: host unreachable') + finally: + self.close() + + def send(self, s): + dbgmsg('SOCKET: sending %s' % s) + self._sock.sendall(s) + + def recv(self, sz=IP_BUFFER_SIZE): + resp = '' + try: + dbgmsg('waiting for %d bytes' % sz) + resp = self._sock.recv(sz) + while len(resp) < sz: + dbgmsg('SOCKET: waiting for %d bytes' % (sz - len(resp))) + resp += self._sock.recv(sz - len(resp)) + except socket.timeout, e: + dbgmsg('SOCKET: timeout while receiving') + pass + except Exception, e: + dbgmsg('SOCKET: exception while receiving') + raise e + return resp + + +# the blocking collector opens a socket then blocks for any data sent over the +# socket. typically this is used with a wiznet in server mode and the brultech +# device emitting data in real-time mode. multiplexed devices may suffer from +# collisions. +class BlockingSocketClientCollector(SocketClientCollector): + def __init__(self, host, port): + super(BlockingSocketClientCollector, self).__init__(host, port) + + def read(self, packet_format): + self._blockingread(packet_format) + + +# the server collector opens a socket then blocks, waiting for connections from +# clients. typically this is used by setting a wiznet to be in client mode +# with the device emitting data in real-time mode. +class SocketServerCollector(BufferedDataCollector): + def __init__(self, host, port): + super(SocketServerCollector, self).__init__() + self._host = host + self._port = int(port) + self._sock = None + self._conn = None + infmsg('SOCKET: bind host: %s' % self._host) + infmsg('SOCKET: bind port: %d' % self._port) + + def readbytes(self, count): + data = self._conn.recv(count) + dbgmsg('SOCKET: read %d of %d bytes from socket: %s' % + (len(data), count, ' '.join(['%02x' % ord(x) for x in data]))) + return data + + def read(self, packet_format): + try: + dbgmsg('SOCKET: waiting for connection') + self._conn, addr = self._sock.accept() + self._blockingread(packet_format) + finally: + if self._conn: + dbgmsg('SOCKET: closing connection') + self._conn.shutdown(socket.SHUT_RD) + self._conn.close() + self._conn = None + + def setup(self): + dbgmsg('SOCKET: binding to %s:%d' % (self._host, self._port)) + self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + try: + self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except Exception: # REUSEPORT may not be supported on all systems + pass + self._sock.bind((self._host, self._port)) + self._sock.listen(1) + + def cleanup(self): + if self._sock: + dbgmsg('SOCKET: closing socket') + self._sock.close() + self._sock = None + + +# the database collector periodically makes a connection to the database and +# gets all records since the last query. +class DatabaseCollector(BufferedDataCollector): + def __init__(self, table, poll_interval): + super(DatabaseCollector, self).__init__() + self._conn = None + self._table = table + self._poll_interval = int(poll_interval) + self._lastread = 0 + infmsg('DB: polling interval: %d seconds' % self._poll_interval) + infmsg('DB: table: %s' % self._table) + + def _getdictcursor(self): + return self._conn.cursor() + + def setup(self): + cursor = self._conn.cursor() + cursor.execute('select max(time_created) from ' + self._table) + row = cursor.fetchone() + if row: + dbgmsg('DB: latest record has time_created of %s' % row[0]) + self._lastread = int(row[0]) + cursor.close() + + def cleanup(self): + if self._conn: + dbgmsg('DB: closing database connection') + self._conn.close() + self._conn = None + + def read(self, packet_format): + dbgmsg('DB: waiting %d seconds' % self._poll_interval) + time.sleep(self._poll_interval) + packets = [] + cursor = self._getdictcursor() + # FIXME: order by time_created just to be safe? + cursor.execute('select * from ' + self._table + ' where time_created > ' + str(self._lastread)) + while True: + row = cursor.fetchone() + if row == None: + break + packets.append(SCHEMA.db2pkt(row)) + self._lastread = max(self._lastread, row['time_created']) + cursor.close() + for p in packets: + self.packet_buffer.insert(p['time_created'], p) + + +class MySQLCollector(DatabaseCollector): + def __init__(self, host, user, password, database, table, poll_interval): + if not MySQLdb: + print 'MySQL Error: MySQLdb module could not be imported.' + sys.exit(1) + + super(MySQLCollector, self).__init__(database+'.'+table, poll_interval) + self._host = host + self._user = user + self._passwd = password + self._database = database + infmsg('MYSQL: host: %s' % self._host) + infmsg('MYSQL: username: %s' % self._user) + infmsg('MYSQL: database: %s' % self._database) + + def _getdictcursor(self): + return self._conn.cursor(MySQLdb.cursors.DictCursor) + + def setup(self): + self._conn = MySQLdb.connect(host=self._host, + user=self._user, + passwd=self._passwd, + db=self._database) + super(MySQLCollector, self).setup() + + def handle(self, e): + if type(e) == MySQLdb.Error: + errmsg('MySQL Error: [#%d] %s' % (e.args[0], e.args[1])) + return True + return super(MySQLCollector, self).handle(e) + + +class SqliteCollector(DatabaseCollector): + def __init__(self, filename, table, poll_interval): + if not sqlite: + print 'Sqlite Error: sqlite3 module could not be imported.' + sys.exit(1) + if not filename: + print 'Sqlite Error: no database file specified' + sys.exit(1) + + super(SqliteCollector, self).__init__(table, poll_interval) + self._file = filename + infmsg('SQLITE: file: %s' % self._file) + + def setup(self): + self._conn = sqlite.connect(self._file) + self._conn.row_factory = sqlite.Row # return dict objects for rows + super(SqliteCollector, self).setup() + + +class RRDCollector(BufferedDataCollector): + def __init__(self, path, step, poll_interval): + if not rrdtool: + print 'RRD Error: rrdtool module could not be imported.' + sys.exit(1) + + super(RRDCollector, self).__init__() + self._poll_interval = int(poll_interval) + self._lastread = 0 + self._dir = path + self._step = step + infmsg('RRD: polling interval: %d seconds' % self._poll_interval) + infmsg('RRD: dir: %s' % self._dir) + infmsg('RRD: step: %s' % self._step) + + def setup(self): + self._lastread = 0 + + # fetch returns 3 arrays: + # begin, end, interval + # labels + # list of tuples + def read(self, packet_format): + dbgmsg('DB: waiting %d seconds' % self._poll_interval) + time.sleep(self._poll_interval) + packets = [] + e = int(now/self._step) * self._step + s = e - 10 * self._step + for sn in serials: + pkts = read_files(sn, s, e) + packets.extend(pkts) + for p in packets: + self.packet_buffer.insert(p['time_created'], p) + + def read_files(self, sn, s, e): + packets = [] + for x in PACKET_FORMAT.channels(FILTER_DB_SCHEMA_COUNTERS): + data = self.read_rrd(sn, x, s, e) + ts = [i for i in range(s, e, step)] + return packets # FIXME: not implemented properly + + def read_rrd(self, sn, channel, s, e): + fn = mkfn(self._dir, mklabel(sn, channel)) + return rrdtool.fetch(fn, 'AVERAGE', '--start', str(s), '--end', str(e)) + + +# Buffer Classes + +class MovingBuffer(object): + """Maintain fixed-size buffer of data. Oldest packets are removed.""" + def __init__(self, maxsize): + self.maxsize = maxsize + self.packets = [] + + def insert(self, timestamp, packet): + dbgmsg('buffering packet ts:%d sn:%s' % (timestamp, packet['serial'])) + bisect.insort(self.packets, (timestamp, packet)) + if len(self.packets) > self.maxsize: + del(self.packets[0]) + + def newest(self, timestamp): + """return all packets with timestamp newer than specified timestamp""" + idx = bisect.bisect(self.packets, (timestamp, {})) + return self.packets[idx:] + + def oldest(self): + """return the oldest packet in the buffer""" + return self.packets[0] + + def size(self): + return len(self.packets) + + def lastmod(self): + if len(self.packets) > 0: + return self.packets[len(self.packets) - 1][0] + return 0 + +class CompoundBuffer(object): + """Variable number of moving buffers, each associated with an ID""" + def __init__(self, maxsize): + self.maxsize = maxsize + self.buffers = dict() + dbgmsg('buffer size: %d' % self.maxsize) + + def insert(self, timestamp, packet): + return self.getbuffer(packet['serial']).insert(timestamp, packet) + + def newest(self, ecm_serial, timestamp): + return self.getbuffer(ecm_serial).newest(timestamp) + + def oldest(self, ecm_serial): + return self.getbuffer(ecm_serial).oldest() + + def size(self, ecm_serial): + return self.getbuffer(ecm_serial).size() + + def lastmod(self, ecm_serial): + return self.getbuffer(ecm_serial).lastmod() + + def getbuffer(self, ecm_serial): + if not ecm_serial in self.buffers: + dbgmsg('adding buffer for %s' % ecm_serial) + self.buffers[ecm_serial] = MovingBuffer(self.maxsize) + return self.buffers[ecm_serial] + + def getkeys(self): + return self.buffers.keys() + + +# Packet Processor Classes + +class BaseProcessor(object): + def __init__(self): + self.last_processed = dict() + self.process_period = 1 # in seconds + + def setup(self): + pass + + def process_compiled(self, packet_buffer): + now = getgmtime() + for sn in packet_buffer.getkeys(): + if packet_buffer.size(sn) < 1: + dbgmsg('buffer is empty for %s' % sn) + continue + if not sn in self.last_processed or now >= self.last_processed[sn] + self.process_period: + if not sn in self.last_processed: + ts = packet_buffer.oldest(sn)[0] + else: + ts = self.last_processed[sn] + data = packet_buffer.newest(sn, ts) + if len(data) > 1: + dbgmsg('%d buffered packets sn:%s' % (len(data), sn)) + packets = [] + for a,b in zip(data[0:], data[1:]): + try: + self.last_processed[sn] = b[0] + packets.append(PACKET_FORMAT.calculate(b[1], a[1])) + except ZeroDivisionError: + infmsg("not enough data in buffer for %s" % sn) + except CounterResetError, cre: + wrnmsg("counter reset for %s: %s" % (sn, cre.msg)) + dbgmsg('%d calculated packets sn:%s' % (len(packets), sn)) + self.process_calculated(packets) + else: + dbgmsg('not enough data for %s' % sn) + continue + else: + x = self.last_processed[sn] + self.process_period - now + dbgmsg('waiting %d seconds to process packets for %s' % (x, sn)) + + def process_calculated(self, packets): + pass + + def handle(self, exception): + return False + + def cleanup(self): + pass + + +class PrintProcessor(BaseProcessor): + def __init__(self): + super(PrintProcessor, self).__init__() + + def process_calculated(self, packets): + for p in packets: + print + PACKET_FORMAT.printPacket(p) + + +class DatabaseProcessor(BaseProcessor): + def __init__(self, table, period): + super(DatabaseProcessor, self).__init__() + self.db_table = table + self.process_period = int(period) + self.conn = None + + def process_calculated(self, packets): + for p in packets: + valsql, nval, sn, ts = SCHEMA.getinsertsql(p) + sql = [] + sql.append('INSERT INTO %s ' % self.db_table) + sql.append(valsql) + dbgmsg('DB: query: %s' % ''.join(sql)) + cursor = self.conn.cursor() + cursor.execute(''.join(sql)) + cursor.close() + infmsg('DB: inserted %d values for %s at %s' % (nval, sn, ts)) + self.conn.commit() + + +class MySQLClient(object): + def __init__(self, host, user, passwd, database, table): + if not MySQLdb: + print 'MySQL Error: MySQLdb module could not be imported.' + sys.exit(1) + + self.conn = None + self.db_host = host + self.db_user = user + self.db_passwd = passwd + self.db_database = database + self.db_table = self.db_database + '.' + table + + infmsg('MYSQL: host: %s' % self.db_host) + infmsg('MYSQL: username: %s' % self.db_user) + infmsg('MYSQL: database: %s' % self.db_database) + infmsg('MYSQL: table: %s' % self.db_table) + + def _open_connection(self): + dbgmsg('MYSQL: opening connection to %s' % self.db_host) + self.conn = MySQLdb.connect(host=self.db_host, + user=self.db_user, + passwd=self.db_passwd, + db=self.db_database) + + def _close_connection(self): + if self.conn: + dbgmsg('MYSQL: closing database connection') + self.conn.close() + self.conn = None + + def setup(self): + self._open_connection() + + def cleanup(self): + self._close_connection() + +class MySQLProcessor(DatabaseProcessor, MySQLClient): + def __init__(self, host, user, passwd, database, table, period, + persistent_connection=False): + DatabaseProcessor.__init__(self, table, period) + MySQLClient.__init__(self, host, user, passwd, database, table) + self._tbl = table + self._persistent_connection = persistent_connection + infmsg('MYSQL: process_period: %d' % self.process_period) + + def setup(self): + cfg = MySQLConfigurator(self.db_host, self.db_user, self.db_passwd, self.db_database, self._tbl) + cfg.configure() + if self._persistent_connection: + MySQLClient.setup(self) + + def cleanup(self): + if self._persistent_connection: + MySQLClient.cleanup(self) + + def process_calculated(self, packets): + try: + if not self._persistent_connection: + MySQLClient.setup(self) + DatabaseProcessor.process_calculated(self, packets) + finally: + if not self._persistent_connection: + MySQLClient.cleanup(self) + + def handle(self, e): + if type(e) == MySQLdb.Error: + errmsg('MySQL Error: [#%d] %s' % (e.args[0], e.args[1])) + return True + return super(MySQLProcessor, self).handle(e) + + +class MySQLConfigurator(MySQLClient): + def __init__(self, host, user, passwd, database, table): + MySQLClient.__init__(self, host, user, passwd, database, table) + + def setup(self): + dbgmsg('MYSQL: opening connection to %s' % self.db_host) + self.conn = MySQLdb.connect(host=self.db_host, + user=self.db_user, + passwd=self.db_passwd) + + def configure(self): + try: + self.setup() + + cursor = self.conn.cursor() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + infmsg('MYSQL: creating database %s' % self.db_database) + cursor.execute('create database if not exists %s' % + self.db_database) + infmsg('MYSQL: creating table %s' % self.db_table) + cursor.execute('create table if not exists %s %s' % + (self.db_table, + SCHEMA.gettablesql('auto_increment'))) + cursor.close() + + self.conn.commit() + + finally: + self.cleanup() + + +class SqliteClient(object): + def __init__(self, filename, table): + if not sqlite: + print 'Sqlite Error: sqlite3 module could not be imported.' + sys.exit(1) + if not filename: + print 'Sqlite Error: no database file specified' + sys.exit(1) + + self.db_filename = filename + self.db_table = table + self.conn = None + infmsg('SQLITE: file: %s' % self.db_filename) + infmsg('SQLITE: table: %s' % self.db_table) + + def setup(self): + self.conn = sqlite.connect(self.db_filename) + + def cleanup(self): + if self.conn: + dbgmsg('SQLITE: closing database connection') + self.conn.close() + self.conn = None + + +class SqliteProcessor(DatabaseProcessor, SqliteClient): + def __init__(self, filename, table, period): + DatabaseProcessor.__init__(self, table, period) + SqliteClient.__init__(self, filename, table) + infmsg('SQLITE: process_period: %d' % self.process_period) + + def setup(self): + cfg = SqliteConfigurator(self.db_filename, self.db_table) + cfg.configure() + SqliteClient.setup(self) + + def cleanup(self): + SqliteClient.cleanup(self) + + +class SqliteConfigurator(SqliteClient): + def __init__(self, filename, table): + SqliteClient.__init__(self, filename, table) + + def configure(self): + try: + self.setup() + + cursor = self.conn.cursor() + cursor.execute("select name from sqlite_master where type='table' and name='%s'" % self.db_table) + row = cursor.fetchone() + cursor.close() + + if row is not None: + return + + infmsg('SQLITE: creating table %s' % self.db_table) + cursor = self.conn.cursor() + cursor.execute('create table %s %s' % (self.db_table, SCHEMA.gettablesql())) + cursor.close() + + self.conn.commit() + + finally: + self.cleanup() + + +class RRDProcessor(BaseProcessor): + def __init__(self, path, step, heartbeat, period): + if not rrdtool: + print 'RRD Error: rrdtool module could not be imported.' + sys.exit(1) + + super(RRDProcessor, self).__init__() + self.process_period = int(period) + self._dir = path + self._step = step + self._heartbeat = heartbeat + infmsg('RRD: update period: %d' % self.process_period) + infmsg('RRD: dir: %s' % self._dir) + infmsg('RRD: step: %s' % self._step) + infmsg('RRD: heartbeat: %s' % self._heartbeat) + + def _mkdir(self): + if not os.path.exists(self._dir): + infmsg('RRD: creating rrd directory %s' % self._dir) + os.makedirs(self._dir) + + def _rrdexists(self, packet, channel): + fn = mkfn(self._dir, mklabel(packet['serial'], channel)) + return os.path.exists(fn) + + # one data source per file with average, min, and max + # dstype is one of COUNTER, GAUGE, DERIVE, ABSOLUTE, COMPUTE + def _create_rrd(self, packet, channel, dstype): + self._mkdir() + ts = packet['time_created'] - 1 + label = mklabel(packet['serial'], channel) + fn = mkfn(self._dir, label) + infmsg('RRD: creating rrd file %s' % fn) + rc = rrdtool.create( + fn, + '--step', str(self._step), + '--start', str(ts), + ["DS:%s:%s:%d:U:U" % (channel, dstype, self._heartbeat)], + "RRA:AVERAGE:0.5:%d:%d" % (RRD_STEPS[0], RRD_RESOLUTIONS[0]), + "RRA:AVERAGE:0.5:%d:%d" % (RRD_STEPS[1], RRD_RESOLUTIONS[1]), + "RRA:AVERAGE:0.5:%d:%d" % (RRD_STEPS[2], RRD_RESOLUTIONS[2]), + "RRA:AVERAGE:0.5:%d:%d" % (RRD_STEPS[3], RRD_RESOLUTIONS[3]), + "RRA:MAX:0.5:%d:%d" % (RRD_STEPS[0], RRD_RESOLUTIONS[0]), + "RRA:MAX:0.5:%d:%d" % (RRD_STEPS[1], RRD_RESOLUTIONS[1]), + "RRA:MAX:0.5:%d:%d" % (RRD_STEPS[2], RRD_RESOLUTIONS[2]), + "RRA:MAX:0.5:%d:%d" % (RRD_STEPS[3], RRD_RESOLUTIONS[3]), + "RRA:MIN:0.5:%d:%d" % (RRD_STEPS[0], RRD_RESOLUTIONS[0]), + "RRA:MIN:0.5:%d:%d" % (RRD_STEPS[1], RRD_RESOLUTIONS[1]), + "RRA:MIN:0.5:%d:%d" % (RRD_STEPS[2], RRD_RESOLUTIONS[2]), + "RRA:MIN:0.5:%d:%d" % (RRD_STEPS[3], RRD_RESOLUTIONS[3])) + if rc: + wrnmsg("RRD: failed to create '%s': %d" % (fn, rc)) + + def _update_rrd(self, label, values): + fn = mkfn(self._dir, label) + infmsg('RRD: updating %s with %d values' % (fn, len(values))) + rc = rrdtool.update(fn, values) + if rc: + wrnmsg("RRD: failed to update '%s': %d" % (fn, rc)) + + def _getvalues(self, ts, value, dstype): + if dstype == 'GAUGE': + s = '%d:%f' % (ts, value) + else: + s = '%d:%d' % (ts, value) + return s + + def _update_files(self, packets): + values = dict() + for p in packets: + for x in PACKET_FORMAT.channels(FILTER_DB_SCHEMA_COUNTERS): + label = mklabel(p['serial'], x) + t = self._gettype(x) + if not self._rrdexists(p, x): + self._create_rrd(p, x, t) + if not label in values: + values[label] = [] + values[label].append(self._getvalues(p['time_created'], p[x], t)) + for label in values.keys(): + if len(values[label]) > 0: + self._update_rrd(label, values[label]) + + def _gettype(self, channel): + if channel.endswith('_aws') or channel.endswith('_ws'): + return 'DERIVE' + if channel[0] == 'p' or channel.endswith('_pws'): + return 'COUNTER' + if channel.endswith('volts') or channel[0] == 't' or channel.endswith('_a') or channel.endswith('_amps') or channel.endswith('_w') or channel.endswith('_wh') or channel.endswith('_pwh') or channel.endswith('_nwh') or channel.endswith('_pw') or channel.endswith('_nw') or channel.endswith('_whd'): + return 'GAUGE' + return 'DERIVE' + + def process_calculated(self, packets): + self._process_all(packets) + + def _process_latest(self, packets): + # process latest packet - assumes rrd update period is same as step + if len(packets) > 0: + self._update_files([packets[len(packets) - 1]]) + + def _process_all(self, packets): + # process each packet - assumes device emits at same frequency as step + self._update_files(packets) + + +class UploadProcessor(BaseProcessor): + class FakeResult(object): + def geturl(self): + return 'fake result url' + def info(self): + return 'fake result info' + def read(self): + return 'fake result read' + + def __init__(self): + super(UploadProcessor, self).__init__() + self.process_period = DEFAULT_UPLOAD_PERIOD + self.timeout = DEFAULT_UPLOAD_TIMEOUT + self.urlopener = None + pass + + def setup(self): + pass + + def process_calculated(self, packets): + pass + + def handle(self, exception): + return False + + def cleanup(self): + pass + + def _create_request(self, url): + req = urllib2.Request(url) + req.add_header("User-Agent", "%s/%s" % (__app__, __version__)) + return req + + def _urlopen(self, url, data): + result = None + try: + req = self._create_request(url) + dbgmsg('%s: url: %s\n headers: %s\n data: %s' % + (self.__class__.__name__, req.get_full_url(), req.headers, data)) + if SKIP_UPLOAD: + result = UploadProcessor.FakeResult() + elif self.urlopener: + result = self.urlopener.open(req, data, self.timeout) + else: + result = urllib2.urlopen(req, data, self.timeout) + infmsg('%s: %dB url, %dB payload' % + (self.__class__.__name__, len(url), len(data))) + dbgmsg('%s: url: %s\n response: %s' % + (self.__class__.__name__, result.geturl(), result.info())) + except urllib2.HTTPError, e: + self._handle_urlopen_error(e, url, data) +# errmsg('%s Error: %s' % (self.__class__.__name__, e.read())) + except Exception, e: + errmsg('%s Error: %s' % (self.__class__.__name__, e)) + return result + + def _handle_urlopen_error(self, e, url, data): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n data: ' + data])) + + +class WattzOnProcessor(UploadProcessor): + def __init__(self, api_key, user, passwd, map_str, period, timeout): + super(WattzOnProcessor, self).__init__() + self.api_key = api_key + self.username = user + self.password = passwd + self.map_str = map_str + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + + infmsg('WO: upload period: %d' % self.process_period) + infmsg('WO: url: %s' % WATTZON_API_URL) + infmsg('WO: api key: %s' % self.api_key) + infmsg('WO: username: %s' % self.username) + infmsg('WO: map: %s' % self.map_str) + + def setup(self): + if not (self.api_key and self.username and self.password and self.map_str): + print 'WattzOn Error: Insufficient parameters' + if not self.api_key: + print ' No API key' + if not self.username: + print ' No username' + if not self.password: + print ' No passord' + if not self.map_str: + print ' No mapping between channels and WattzOn meters' + sys.exit(1) + self.map = pairs2dict(self.map_str) + if not self.map: + print 'WattzOn Error: cannot determine channel-meter map' + sys.exit(1) + p = urllib2.HTTPPasswordMgrWithDefaultRealm() + p.add_password('WattzOn', WATTZON_API_URL, self.username,self.password) + auth = urllib2.HTTPBasicAuthHandler(p) + self.urlopener = urllib2.build_opener(auth) + + def process_calculated(self, packets): + for p in packets: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + key = mklabel(p['serial'], c) + if key in self.map: + meter = self.map[key] + ts = mkts(p['time_created']) + result = self._make_call(meter, ts, p[c+'_w']) + infmsg('WattzOn: %d [%s] magnitude: %.2f' % + (ts, meter, p[c+'_w'])) + dbgmsg('WattzOn: %s' % result.info()) + + def handle(self, e): + if type(e) == urllib2.HTTPError: + errmsg(''.join(['HTTPError: ' + str(e), + '\n URL: ' + e.geturl(), + '\n username: ' + self.username, + '\n password: ' + self.password, + '\n API key: ' + self.api_key])) + return True + return super(WattzOnProcessor, self).handle(e) + + def _make_call(self, meter, timestamp, magnitude): + data = {'updates': [{ + 'timestamp': timestamp, + 'power': { + 'magnitude': int(magnitude), # truncated by WattzOn API + 'unit': 'W'}}] + } + url = '%s/user/%s/powermeter/%s/upload.json?key=%s' % ( + WATTZON_API_URL, + self.username, + urllib.quote(meter), + self.api_key) + req = self._create_request(url) + return self.urlopener.open(req, json.dumps(data), self.timeout) + + def _create_request(self, url): + req = super(WattzOnProcessor, self)._create_request(url) + req.add_header("Content-Type", "application/json") + return req + + +class PlotWattProcessor(UploadProcessor): + def __init__(self, api_key, house_id, map_str, period, timeout): + super(PlotWattProcessor, self).__init__() + self.api_key = api_key + self.house_id = house_id + self.map_str = map_str + self.url = PLOTWATT_BASE_URL + PLOTWATT_UPLOAD_URL + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + + infmsg('PW: upload period: %d' % self.process_period) + infmsg('PW: url: %s' % self.url) + infmsg('PW: api key: %s' % self.api_key) + infmsg('PW: house id: %s' % self.house_id) + infmsg('PW: map: %s' % self.map_str) + + def setup(self): + if not (self.api_key and self.house_id and self.map_str): + print 'PlotWatt Error: Insufficient parameters' + if not self.api_key: + print ' No API key' + if not self.house_id: + print ' No house ID' + if not self.map_str: + print ' No mapping between channels and PlotWatt meters' + sys.exit(1) + self.map = pairs2dict(self.map_str) + if not self.map: + print 'PlotWatt Error: cannot determine channel-meter map' + sys.exit(1) + + def process_calculated(self, packets): + s = [] + for p in packets: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + key = mklabel(p['serial'], c) + if key in self.map: + meter = self.map[key] + # format for each meter is: meter-id,kW,gmt-timestamp + s.append("%s,%.5f,%d" % + (meter, p[c+'_w']/1000, p['time_created'])) + if len(s): + self._urlopen(self.url, ','.join(s)) + # FIXME: check for server response + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n API key: ' + self.api_key, + '\n house ID: ' + self.house_id, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(PlotWattProcessor, self)._create_request(url) + req.add_header("Content-Type", "text/xml") + b64s = base64.encodestring('%s:%s' % (self.api_key, ''))[:-1] + req.add_header("Authorization", "Basic %s" % b64s) + return req + + +# format for the enersave uploads is based on the pdf document called +# 'myenersave upload api v0.8' from jan2012. +# +# the energy measurements must be sorted by timestamp from oldest to newest, +# and the value of the energy reading is a cumulative measurement of energy. +class EnerSaveProcessor(UploadProcessor): + def __init__(self, url, token, map_str, period, timeout): + super(EnerSaveProcessor, self).__init__() + self.url = url + self.token = token + self.map_str = map_str + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + + infmsg('ES: upload period: %d' % self.process_period) + infmsg('ES: url: %s' % self.url) + infmsg('ES: token: %s' % self.token) + infmsg('ES: map: %s' % self.map_str) + + def setup(self): + if not (self.url and self.token): + print 'EnerSave Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.token: + print ' No token' + sys.exit(1) + self.map = self.tuples2dict(self.map_str) + + @staticmethod + def tuples2dict(s): + items = s.split(',') + m = dict() + for k, d, t in zip(items[::3], items[1::3], items[2::3]): + m[k] = {'desc': d, 'type': t} + return m + + def process_calculated(self, packets): + sensors = dict() + readings = dict() + for p in packets: + if self.map: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + key = mklabel(p['serial'], c) + if key in self.map: + tpl = self.map[key] + dev_id = mklabel(obfuscate_serial(p['serial']), c) + dev_type = tpl['type'] or ES_DEFAULT_TYPE + dev_desc = tpl['desc'] or c + sensors[dev_id] = {'type': dev_type, 'desc': dev_desc} + if not dev_id in readings: + readings[dev_id] = [] + readings[dev_id].append( + '' % + (p['time_created'], p[c+'_wh'])) + else: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + dev_id = mklabel(obfuscate_serial(p['serial']), c) + dev_type = ES_DEFAULT_TYPE + dev_desc = c + sensors[dev_id] = {'type': dev_type, 'desc': dev_desc} + if not dev_id in readings: + readings[dev_id] = [] + readings[dev_id].append('' % + (p['time_created'], p[c+'_wh'])) + s = [] + for key in sensors: + s.append('' % + (key, sensors[key]['type'], sensors[key]['desc'])) + s.append(''.join(readings[key])) + s.append('') + if len(s): + s.insert(0, '') + s.insert(1, '') + s.append('') + self._urlopen(self.url, ''.join(s)) + # FIXME: check for server response + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(EnerSaveProcessor, self)._create_request(url) + req.add_header("Content-Type", "application/xml") + req.add_header("Token", self.token) + return req + + +# format for the Bidgely uploads is based on the pdf document called +# 'Bidgely Developer API v1.0.1' from 5/27/13. +# +# the energy measurements must be sorted by timestamp from oldest to newest, +# and the value of the energy reading is a cumulative measurement of energy. +class BidgelyProcessor(UploadProcessor): + def __init__(self, url, map_str, period, timeout): + super(BidgelyProcessor, self).__init__() + self.url = url + self.map_str = map_str + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + + infmsg('BY: upload period: %d' % self.process_period) + infmsg('BY: url: %s' % self.url) + infmsg('BY: map: %s' % self.map_str) + + def setup(self): + if not (self.url and self.map_str): + print 'Bidgely Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.map_str: + print ' No Map' + sys.exit(1) + self.map = self.tuples2dict(self.map_str) + + @staticmethod + def tuples2dict(s): + items = s.split(',') + m = dict() + for k, d, t in zip(items[::3], items[1::3], items[2::3]): + m[k] = {'desc': d, 'type': t} + return m + + def process_calculated(self, packets): + sensors = dict() + wh_readings = dict() + w_readings = dict() + for p in packets: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + key = mklabel(p['serial'], c) + if key in self.map: + tpl = self.map[key] + dev_id = mklabel(obfuscate_serial(p['serial']), c) + dev_type = tpl['type'] or BY_DEFAULT_TYPE + dev_desc = tpl['desc'] or dev_id + sensors[dev_id] = {'type': dev_type, 'desc': dev_desc} + if not dev_id in wh_readings: + wh_readings[dev_id] = [] + wh_readings[dev_id].append('' % + (p['time_created'], p[c+'_wh'])) + if not dev_id in w_readings: + w_readings[dev_id] = [] + w_readings[dev_id].append('' % + (p['time_created'], p[c+'_w'])) + s = [] + for key in sensors: + # FIXME different ID for generation + s.append('' % + (key, sensors[key]['type'], sensors[key]['desc'])) + s.append('') + s.append('' % + (sensors[key]['desc'])) + s.append(''.join(wh_readings[key])) + s.append('') + s.append('' % + (sensors[key]['desc'])) + s.append(''.join(w_readings[key])) + s.append('') + s.append('') + s.append('') + if len(s): + s.insert(0, '') + s.insert(1, '') + s.insert(2, '') + s.append('') + s.append('') + self._urlopen(self.url, ''.join(s)) + # FIXME: check for server response + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(BidgelyProcessor, self)._create_request(url) + req.add_header("Content-Type", "application/xml") + return req + + +# format for the peoplepower data upload is based on the DeviceAPI web pages +# at developer.peoplepowerco.com/docs/?q=node/37 from dec2011-jan2012. +# +# the peoplepower documentation does not indicate whether energy readings +# are cumulative or delta. this implementation uses cumulative. +class PeoplePowerProcessor(UploadProcessor): + def __init__(self, url, token, hub_id, map_str, period, timeout, add_devices): + super(PeoplePowerProcessor, self).__init__() + self.url = url + self.token = token + self.hub_id = hub_id + self.map_str = map_str + self.nonce = PPCO_FIRST_NONCE + self.dev_type = PPCO_DEVICE_TYPE + self.process_period = int(period) + self.timeout = int(timeout) + self.do_add_devices = PPCO_ADD_DEVICES if add_devices is None else add_devices + self.map = dict() + + infmsg('PP: upload period: %d' % self.process_period) + infmsg('PP: url: %s' % self.url) + infmsg('PP: token: %s' % self.token) + infmsg('PP: hub id: %s' % self.hub_id) + infmsg('PP: map: %s' % self.map_str) + infmsg('PP: add devices: %s' % self.do_add_devices) + + def setup(self): + if not (self.url and self.token and self.hub_id and self.map_str): + print 'PeoplePower Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.token: + print ' No token' + if not self.hub_id: + print ' No hub ID' + if not self.map_str: + print ' No mapping between channels and PeoplePower devices' + sys.exit(1) + self.map = pairs2dict(self.map_str) + if not self.map: + print 'PeoplePower Error: cannot determine channel-meter map' + sys.exit(1) + if self.do_add_devices: + self.add_devices() + + def process_calculated(self, packets): + s = [] + for p in packets: + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + key = mklabel(p['serial'], c) + if key in self.map: + ts = mkts(p['time_created']) + s.append('' % (self.map[key], self.dev_type, ts)) + s.append('%.2f' % + p[c+'_w']) + s.append('%.5f' % + p[c+'_wh']) + s.append('') + if len(s): + result = self._urlopen(self.url, s) + if result and result.read: + resp = result.read() + if not resp or resp.find('ACK') == -1: + wrnmsg('PP: upload failed: %s' % resp) + + def add_devices(self): + s = [] + for key in self.map.keys(): + s.append('' % + (self.map[key], self.dev_type)) + if len(s): + result = self._urlopen(self.url, s) + if result and result.read: + resp = result.read() + if not resp or resp.find('ACK') == -1: + wrnmsg('PP: add devices failed: %s' % resp) + + def _urlopen(self, url, s): + s.insert(0, '') + s.insert(1, '' % + (self.hub_id, self.nonce)) + s.append('') + result = super(PeoplePowerProcessor, self)._urlopen(url, ''.join(s)) + self.nonce += 1 + return result + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n hub ID: ' + self.hub_id, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(PeoplePowerProcessor, self)._create_request(url) + req.add_header("Content-Type", "text/xml") + req.add_header("PPCAuthorization", "esp token=%s" % self.token) + return req + + +class EragyProcessor(UploadProcessor): + def __init__(self, url, gateway_id, token, period, timeout): + super(EragyProcessor, self).__init__() + self.url = url + self.gateway_id = gateway_id + self.token = token + self.process_period = int(period) + self.timeout = int(timeout) + + infmsg('EG: upload period: %d' % self.process_period) + infmsg('EG: url: ' + self.url) + infmsg('EG: gateway ID: ' + self.gateway_id) + infmsg('EG: token: ' + self.token) + + def setup(self): + if not (self.url and self.gateway_id and self.token): + print 'Eragy Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.gateway_id: + print ' No gateway ID' + if not self.token: + print ' No token' + sys.exit(1) + + def process_calculated(self, packets): + s = [] + for p in packets: + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + key = mklabel(obfuscate_serial(p['serial']), c) + s.append('' % (key,p['time_created'],p[c+'_w'])) + if len(s): + s.insert(0, '' % + (self.gateway_id, self.token)) + s.append('') + result = self._urlopen(self.url, ''.join(s)) + if result and result.read: + resp = result.read() + if not resp == '0': + wrnmsg('EG: upload failed: %s' % resp) + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(EragyProcessor, self)._create_request(url) + req.add_header("Content-Type", "text/xml") + return req + + +# smart energy groups expects delta measurements for both power and energy. +# this is not a cumulative energy reading! +class SmartEnergyGroupsProcessor(UploadProcessor): + def __init__(self, url, token, map_str, period, timeout): + super(SmartEnergyGroupsProcessor, self).__init__() + self.url = url + self.token = token + self.map_str = map_str + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + + infmsg('SEG: upload period: %d' % self.process_period) + infmsg('SEG: url: %s' % self.url) + infmsg('SEG: token: %s' % self.token) + infmsg('SEG: map: %s' % self.map_str) + + def setup(self): + if not (self.url and self.token): + print 'SmartEnergyGroups Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.token: + print ' No token' + sys.exit(1) + self.map = pairs2dict(self.map_str.lower()) + self.urlopener = urllib2.build_opener(urllib2.HTTPHandler) + + def process_calculated(self, packets): + nodes = [] + for p in packets: + s = [] + if self.map: + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + key = mklabel(p['serial'], c) # str(idx+1) + if key in self.map: + meter = self.map[key] or c + s.append('(p_%s %.2f)' % (meter, p[c+'_w'])) + s.append('(e_%s %.5f)' % (meter, p[c+'_dwh'])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PULSE)): + key = mklabel(p['serial'], c) # str(idx+1) + if key in self.map: + meter = self.map[key] or c + s.append('(n_%s %d)' % (meter,p[c])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_SENSOR)): + key = mklabel(p['serial'], c) # str(idx+1) + if key in self.map: + meter = self.map[key] or c + s.append('(temperature_%s %.2f)' % (meter, p[c])) + else: + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + meter = c # str(idx+1) + s.append('(p_%s %.2f)' % (meter, p[c+'_w'])) + s.append('(e_%s %.5f)' % (meter, p[c+'_dwh'])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PULSE)): + meter = c # str(idx+1) + s.append('(n_%s %d)' % (meter,p[c])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_SENSOR)): + meter = c # str(idx+1) + s.append('(temperature_%s %.2f)' % (meter, p[c])) + if len(s): + ts = mkts(p['time_created']) + node = obfuscate_serial(p['serial']) + s.insert(0, '(node %s %s ' % (node, ts)) + s.append(')') + nodes.append(''.join(s)) + if len(nodes): + nodes.insert(0, 'data_post=(site %s ' % self.token) + nodes.append(')') + result = self._urlopen(self.url, ''.join(nodes)) + if result and result.read: + resp = result.read() + resp = resp.replace('\n', '') + if not resp == '(status ok)': + wrnmsg('SEG: upload failed: %s' % resp) + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n data: ' + payload])) + + def _create_request(self, url): + req = super(SmartEnergyGroupsProcessor, self)._create_request(url) + req.get_method = lambda: 'PUT' + return req + + +class ThingSpeakProcessor(UploadProcessor): + def __init__(self, url, tokens, fields, period, timeout): + super(ThingSpeakProcessor, self).__init__() + self.url = url + self.tokens_str = tokens + self.fields_str = fields + self.process_period = int(period) + self.timeout = int(timeout) + self.tokens = dict() + self.fields = dict() + + infmsg('TS: upload period: %d' % self.process_period) + infmsg('TS: url: %s' % self.url) + infmsg('TS: tokens: %s' % self.tokens_str) + infmsg('TS: fields: %s' % self.fields_str) + + def setup(self): + if not (self.url and self.tokens_str): + print 'ThingSpeak Error: Insufficient parameters' + if not self.url: + print ' No URL' + if not self.tokens_str: + print ' No tokens' + sys.exit(1) + self.tokens = pairs2dict(self.tokens_str) + self.fields = pairs2dict(self.fields_str) + + def process_calculated(self, packets): + for p in packets: + ecm_serial = p['serial'] + if ecm_serial in self.tokens: + token = self.tokens[ecm_serial] + s = [] + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + key = mklabel(ecm_serial, c) + if not self.fields: + s.append('&field%d=%.2f' % (idx+1, p[c+'_w'])) + elif key in self.fields: + s.append('&field%s=%.2f' % (self.fields[key], p[c+'_w'])) + if len(s): + s.insert(0, 'key=%s' % token) + s.insert(1, '&datetime=%s' % mkts(p['time_created'])) + result = self._urlopen(self.url, ''.join(s)) + if result and result.read: + resp = result.read() + if resp == 0: + wrnmsg('TS: upload failed for %s: %s' % (ecm_serial, resp)) + else: + wrnmsg('TS: upload failed for %s' % ecm_serial) + else: + wrnmsg('TS: no token defined for %s' % ecm_serial) + + +class PachubeProcessor(UploadProcessor): + def __init__(self, url, token, feed, period, timeout): + super(PachubeProcessor, self).__init__() + self.url = url + self.token = token + self.feed = feed + self.process_period = int(period) + self.timeout = int(timeout) + + infmsg('PBE: upload period: %d' % self.process_period) + infmsg('PBE: url: %s' % self.url) + infmsg('PBE: token: %s' % self.token) + infmsg('PBE: feed: %s' % self.feed) + + def setup(self): + if not (self.url and self.token and self.feed): + print 'Pachube Error: Insufficient parameters' + if not self.url: + print ' A URL is required' + if not self.url: + print ' A token is required' + if not self.feed: + print ' A feed is required' + sys.exit(1) + + def process_calculated(self, packets): + streams = dict() + for p in packets: + osn = obfuscate_serial(p['serial']) + ts = mkts(p['time_created']) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + dskey = mklabel(osn, c) + if not dskey in streams: + streams[dskey] = {'id': dskey, 'datapoints': []} + dp = {'at': ts, 'value': p[c+'_w']} + streams[dskey]['datapoints'].append(dp) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PULSE)): + dskey = mklabel(osn, c) + if not dskey in streams: + streams[dskey] = {'id': dskey, 'datapoints': []} + dp = {'at': ts, 'value': p[c]} + streams[dskey]['datapoints'].append(dp) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_SENSOR)): + dskey = mklabel(osn, c) + if not dskey in streams: + streams[dskey] = {'id': dskey, 'datapoints': []} + dp = {'at': ts, 'value': p[c]} + streams[dskey]['datapoints'].append(dp) + if len(streams.keys()) > 0: + data = {'version':'1.0.0', 'datastreams':[]} + for key in streams.keys(): + data['datastreams'].append(streams[key]) + url = '%s/%s' % (self.url, self.feed) + result = self._urlopen(url, json.dumps(data)) + # FIXME: need error handling here + + def _create_request(self, url): + req = super(PachubeProcessor, self)._create_request(url) + req.add_header('X-PachubeApiKey', self.token) + req.get_method = lambda: 'PUT' + return req + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n data: ' + payload])) + + +class OpenEnergyMonitorProcessor(UploadProcessor): + def __init__(self, url, token, node, period, timeout): + super(OpenEnergyMonitorProcessor, self).__init__() + self.url = url + self.token = token + self.node = node + self.process_period = int(period) + self.timeout = int(timeout) + + infmsg('OEM: upload period: %d' % self.process_period) + infmsg('OEM: timeout: %d' % self.timeout) + infmsg('OEM: url: %s' % self.url) + infmsg('OEM: token: %s' % self.token) + infmsg('OEM: node: %s' % self.node) + + def setup(self): + if not (self.url and self.token): + print 'OpenEnergyMonitor Error: Insufficient parameters' + if not self.url: + print ' A URL is required' + if not self.token: + print ' A token is required' + sys.exit(1) + + def process_calculated(self, packets): + for p in packets: + osn = obfuscate_serial(p['serial']) + data = [] + data.append('(%s:%.1f)' % (mklabel(osn, 'volts'), p['volts'])) + if INCLUDE_CURRENT: + for idx, c, in enumerate(PACKET_FORMAT.channels(FILTER_CURRENT)): + data.append('%s:%.2f' % (mklabel(osn, c), p[c])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + data.append('%s_w:%.2f' % (mklabel(osn, c), p[c+'_w'])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PE_LABELS)): + data.append('%s_wh:%.2f' % (mklabel(osn, c), p[c+'_wh'])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_PULSE)): + data.append('%s:%d' % (mklabel(osn, c), p[c])) + for idx, c in enumerate(PACKET_FORMAT.channels(FILTER_SENSOR)): + data.append('%s:%.2f' % (mklabel(osn, c), p[c])) + if len(data): + nstr = '' if self.node is None else '&node=%s' % self.node + url = '%s?apikey=%s&time=%s%s&json={%s}' % ( + self.url, self.token, p['time_created'], nstr, + ','.join(data)) + result = self._urlopen(url, '') + # FIXME: need error handling here + + def _create_request(self, url): + req = super(OpenEnergyMonitorProcessor, self)._create_request(url) + return req + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n token: ' + self.token, + '\n data: ' + payload])) + + +class WattvisionProcessor(UploadProcessor): + def __init__(self, url, api_id, api_key, sensor_id, channelstr, period, timeout): + super(WattvisionProcessor, self).__init__() + self.url = url + self.api_id = api_id + self.api_key = api_key + self.sensor_id = sensor_id + self.channelstr = channelstr + self.process_period = int(period) + self.timeout = int(timeout) + self.serial = '' + self.channel = '' + + infmsg('WV: upload period: %d' % self.process_period) + infmsg('WV: timeout: %d' % self.timeout) + infmsg('WV: url: %s' % self.url) + infmsg('WV: api_id: %s' % self.api_id) + infmsg('WV: api_key: %s' % self.api_key) + infmsg('WV: sensor_id: %s' % self.sensor_id) + infmsg('WV: channel: %s' % self.channelstr) + + def setup(self): + if not (self.url and self.api_id and self.api_key and self.sensor_id and self.channelstr): + print 'Wattvision Error: Insufficient parameters' + if not self.url: + print ' A URL is required' + if not self.api_id: + print ' An API ID is required' + if not self.api_key: + print ' An API key is required' + if not self.sensor_id: + print ' A Sensor ID is required' + if not self.channelstr: + print ' A channel is required' + sys.exit(1) + idx = self.channelstr.find('_') + if idx == -1: + print 'bad format for channel. expecting XXXXXX_chY' + sys.exit(1) + self.serial = self.channelstr[0:idx] + self.channel = self.channelstr[idx + 1:] + + def process_calculated(self, packets): + for p in packets: + if p['serial'] == self.serial: + ts = mkts(p['time_created']) + data = { + 'sensor_id': self.sensor_id, + 'api_id': self.api_id, + 'api_key': self.api_key, + 'watts': p[self.channel+'_w'], + 'time': ts + } + result = self._urlopen(self.url, json.dumps(data)) + # FIXME: need error handling here + + def _create_request(self, url): + req = super(WattvisionProcessor, self)._create_request(url) + req.get_method = lambda: 'POST' + return req + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n api_id: ' + self.api_id, + '\n api_key: ' + self.api_key, + '\n sensor_id: ' + self.sensor_id, + '\n data: ' + payload])) + + +class PVOutputProcessor(UploadProcessor): + def __init__(self, url, api_key, system_id, gen_str, con_str, temp_str, period, timeout): + super(PVOutputProcessor, self).__init__() + self.url = url + self.api_key = api_key + self.system_id = system_id + self.gen_str = gen_str + self.con_str = con_str + self.temp_str = temp_str + self.process_period = int(period) + self.timeout = int(timeout) + + self.gen_ch = None + self.gen_serial = None + self.con_ch = None + self.con_serial = None + self.temp_ch = None + self.temp_serial = None + + infmsg('PVO: upload period: %d' % self.process_period) + infmsg('PVO: timeout: %d' % self.timeout) + infmsg('PVO: url: %s' % self.url) + infmsg('PVO: api_key: %s' % self.api_key) + infmsg('PVO: system_id: %s' % self.system_id) + infmsg('PVO: generation channel: %s' % self.gen_str) + infmsg('PVO: consumption channel: %s' % self.con_str) + infmsg('PVO: temperature channel: %s' % self.temp_str) + + def setup(self): + if not (self.url and self.api_key and self.system_id): + print 'PVOutput Error: Insufficient parameters' + if not self.url: + print ' A URL is required' + if not self.api_key: + print ' An API Key is required' + if not self.system_id: + print ' A system ID is required' + sys.exit(1) + [self.gen_ch, self.gen_serial] = self._split(self.gen_str) + [self.con_ch, self.con_serial] = self._split(self.con_str) + [self.temp_ch, self.temp_serial] = self._split(self.temp_str) + + def _split(self, s): + sn = None + ch = s + idx = s.find('_') + if idx >= 0: + sn = s[0:idx] + ch = s[idx+1:] + return [ch, sn] + + def _havegen(self, sn): + return self.gen_ch and (not self.gen_serial or self.gen_serial == sn) + + def _havecon(self, sn): + return self.con_ch and (not self.con_serial or self.con_serial == sn) + + def _havetemp(self, sn): + return self.temp_ch and (not self.temp_serial or self.temp_serial == sn) + + def process_calculated(self, packets): + p = packets[len(packets)-1] + if self._havegen(p['serial']) or self._havecon(p['serial']): + data = dict() + data['d'] = time.strftime('%Y%m%d', time.localtime(p['time_created'])) + data['t'] = time.strftime('%H:%M', time.localtime(p['time_created'])) + data['c1'] = 1 + data['v6'] = p['volts'] + if self._havegen(p['serial']): + data['v1'] = p[self.gen_ch+'_wh'] + data['v2'] = p[self.gen_ch+'_w'] + if self._havecon(p['serial']): + data['v3'] = p[self.con_ch+'_wh'] + data['v4'] = p[self.con_ch+'_w'] + if self._havetemp(p['serial']): + data['v5'] = p[self.temp_ch] + result = self._urlopen(self.url, urllib.urlencode(data)) + # FIXME: need error handling here + + def _create_request(self, url): + req = super(PVOutputProcessor, self)._create_request(url) + req.add_header('X-Pvoutput-Apikey', self.api_key) + req.add_header('X-Pvoutput-SystemId', self.system_id) + req.get_method = lambda: 'POST' + return req + + def _handle_urlopen_error(self, e, url, payload): + errmsg(''.join(['%s Error: %s' % (self.__class__.__name__, e), + '\n URL: ' + url, + '\n api_key: ' + self.api_key, + '\n system_id: ' + self.system_id, + '\n data: ' + payload])) + + +class MQTTProcessor(BaseProcessor): + def __init__(self, host, port, clientid, base_topic, qos, retain, + will, user, passwd, tls, map, period): + if not publish: + print 'MQTT Error: paho.mqtt.publish module could not be imported.' + sys.exit(1) + + super(MQTTProcessor, self).__init__() + self.host = host + self.port = int(port) + self.clientid = clientid + self.base_topic = base_topic + self.qos = int(qos) + self.retain = retain + self.will = will + self.user = user + self.passwd = passwd + self.tls = tls + self.map_str = map + self.process_period = int(period) + + infmsg('MQTT: mqtt:%s:%d?clientid=%s' % + (self.host, self.port, self.clientid)) + infmsg('MQTT: user: %s' % (self.user or '')) + infmsg('MQTT: tls: %s' % (self.tls or '')) + infmsg('MQTT: topic: %s' % self.base_topic) + infmsg('MQTT: qos: %d' % self.qos) + infmsg('MQTT: retain: %s' % self.retain) + infmsg('MQTT: will: %s' % (self.will or '')) + infmsg('MQTT: upload period: %d' % self.process_period) + infmsg('MQTT: map: %s' % self.map_str) + + def setup(self): + if self.user == None and self.passwd != None: + print 'MQTT Error: mqtt-user must be provided if mqtt-passwd configured' + sys.exit(1) + if self.qos not in (0, 1, 2): + print 'MQTT Error: qos values are 0, 1 or 2' + sys.exit(1) + + self.map = pairs2dict(self.map_str) + if (self.user == None): + self.auth = None + else: + self.auth = {'username': self.user} + if self.passwd not in (None, ''): + self.auth['password'] = self.passwd + + try: + self.will = json.loads(self.will) if self.will else None + except Exception: + print 'MQTT Error: mqtt-will parameter must be valid JSON' + sys.exit(1) + + try: + self.tls = json.loads(self.tls) if self.tls else None + except Exception: + print 'MQTT Error: mqtt-tls parameter must be valid JSON' + sys.exit(1) + + def _add_msg(self, packet, channel, payload): + if not payload: + return + key = mklabel(packet['serial'], channel) + if key in self.map: + key = self.map[key] + self._msgs.append({'topic': '%s/%s' % (self.base_topic, key), + 'payload': round(payload, 3), + 'qos': self.qos, + 'retain': self.retain}) + + def process_calculated(self, packets): + self._msgs = [] + for p in packets: + self._add_msg(p, 'volts', p['volts']) + for f in [FILTER_POWER, FILTER_ENERGY, FILTER_PULSE, FILTER_SENSOR]: + for c in PACKET_FORMAT.channels(f): + self._add_msg(p, c, p[c]) + # Delta Wh + for c in PACKET_FORMAT.channels(FILTER_PE_LABELS): + self._add_msg(p, c+'_dwh', p[c+'_dwh']) + + if len(self._msgs): + dbgmsg('MQTT: len=%d, msgs=%s' % + (len(self._msgs), json.dumps(self._msgs))) + publish.multiple(self._msgs, hostname=self.host, port=self.port, + client_id=self.clientid, auth=self.auth, + will=self.will, tls=self.tls) + self.msgs = None + else: + dbgmsg('MQTT: Nothing to send') + + +class InfluxDBProcessor(UploadProcessor): + def __init__(self, host, port, username, password, database, mode, measurement, map_str, tag_str, period, timeout, db_schema): + super(InfluxDBProcessor, self).__init__() + self.host = host + self.port = port + self.username = username + self.password = password + self.database = database + self.mode = mode + self.measurement = measurement + self.map_str = map_str + self.tag_str = tag_str + self.process_period = int(period) + self.timeout = int(timeout) + self.map = dict() + self.tags = dict() + + if not db_schema: + self.db_schema = FILTER_DB_SCHEMA_COUNTERS + elif db_schema == DB_SCHEMA_COUNTERS: + self.db_schema = FILTER_DB_SCHEMA_COUNTERS + elif db_schema == DB_SCHEMA_ECMREAD: + self.db_schema = FILTER_DB_SCHEMA_ECMREAD + elif db_schema == DB_SCHEMA_ECMREADEXT: + self.db_schema = FILTER_DB_SCHEMA_ECMREADEXT + else: + print "Unsupported database schema '%s'" % db_schema + print 'supported schemas include:' + for fmt in DB_SCHEMAS: + print ' %s' % fmt + sys.exit(1) + + + infmsg('InfluxDB: upload period: %d' % self.process_period) + infmsg('InfluxDB: host: %s' % self.host) + infmsg('InfluxDB: port: %s' % self.port) + infmsg('InfluxDB: username: %s' % self.username) + infmsg('InfluxDB: map: %s' % self.map_str) + infmsg('InfluxDB: schema: %s' % self.db_schema) + + def setup(self): + self.map = pairs2dict(self.map_str) + self.tags = pairs2dict(self.tag_str) + + def process_calculated(self, packets): + sensors = dict() + readings = dict() + series = [] + for p in packets: + dev_serial = obfuscate_serial(p['serial']) + for c in PACKET_FORMAT.channels(self.db_schema): + key = mklabel(p['serial'], c) + if self.map and key not in self.map: + continue + values = { + "measurement": self.measurement, + "time": mkts(p['time_created']), + } + if self.mode == 'col': + values['fields'] = { + 'value': p[c] * 1.0, + } + values['tags'] = { + "serial": dev_serial, + "id": self.map[key] if key in self.map else c, + } + else: + value_name = self.map[key] if key in self.map else mklabel(dev_serial, c) + values['fields'] = {} + values['fields'][value_name] = p[c] * 1.0 + series.append(values) + client = InfluxDBClient(self.host, self.port, self.username, self.password, self.database) + try: + client.create_database(self.database) + except: + pass + client.write_points(series, tags=self.tags) + + +if __name__ == '__main__': + parser = optparse.OptionParser(version=__version__) + + parser.add_option('-c', '--config-file', dest='configfile', help='read configuration from FILE', metavar='FILE') + parser.add_option('-p', '--print', action='store_true', dest='print_out', default=False, help='print data to screen') + parser.add_option('-q', '--quiet', action='store_true', dest='quiet', default=False, help='quiet output') + parser.add_option('-v', '--verbose', action='store_false', dest='quiet', default=False, help='verbose output') + parser.add_option('--debug', action='store_true', default=False, help='debug output') + parser.add_option('--skip-upload', action='store_true', default=False, help='do not upload data but print what would happen') + parser.add_option('--buffer-size', help='number of packets to keep in cache', metavar='SIZE') + parser.add_option('--trust-device-clock', action='store_true', default=False, help='use device clock for packet timestamps') + parser.add_option('--utc-device-clock', action='store_true', dest='device_clock_is_utc', default=False, help='device clock is in UTC') + parser.add_option('--reverse-polarity', default=False, help='reverse polarity on all channels') + parser.add_option('--include-current', default=False, help='include and process current (amp) values from GEM') + parser.add_option('--device-list', help='comma-separated list of device identifiers', metavar='LIST') + parser.add_option('--full-serials', action='store_true', default=False, help='show full serial numbers instead of XXX123') + + parser.add_option('--device-type', help='device types include '+', '.join(DEVICE_TYPES)+'; default is '+DEFAULT_DEVICE_TYPE, metavar='TYPE') + parser.add_option('--packet-format', help='formats include '+', '.join(PACKET_FORMATS), metavar='FMT') + parser.add_option('--db-schema', help='schemas include '+', '.join(DB_SCHEMAS)+'; default is '+DEFAULT_DB_SCHEMA, metavar='SCHEMA') + + group = optparse.OptionGroup(parser, 'database setup options') + group.add_option('--mysql-config', action='store_true', default=False, help='configure mysql database') + group.add_option('--sqlite-config', action='store_true', default=False, help='configure sqlite database') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'serial source options') + group.add_option('--serial', action='store_true', dest='serial_read', default=False, help='read from serial port') + group.add_option('--serial-port', help='serial port', metavar='PORT') + group.add_option('--serial-baud', help='serial baud rate', metavar='RATE') + group.add_option('--serial-poll-interval', help='how often to poll the device for data, 0 indicates block for data; default is 0', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'tcp/ip source options') + group.add_option('--ip', action='store_true', dest='ip_read', default=False, help='read from tcp/ip source such as WIZnet or EtherBee') + group.add_option('--ip-host', help='ip host', metavar='HOSTNAME') + group.add_option('--ip-port', help='ip port', metavar='PORT') + group.add_option('--ip-mode', help='act as client or server', metavar='MODE') + group.add_option('--ip-poll-interval', help='for client mode, how often to poll the device for data, 0 indicates block for data; default is 0', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'mysql source options') + group.add_option('--mysql-src', action='store_true', dest='mysql_read', default=False, help='read from mysql database') + group.add_option('--mysql-src-host', help='source database host', metavar='HOSTNAME') + group.add_option('--mysql-src-user', help='source database user', metavar='USERNAME') + group.add_option('--mysql-src-passwd', help='source database password', metavar='PASSWORD') + group.add_option('--mysql-src-database', help='source database name', metavar='DATABASE') + group.add_option('--mysql-src-table', help='source database table', metavar='TABLE') + group.add_option('--mysql-poll-interval', help='how often to poll the database in seconds', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'sqlite source options') + group.add_option('--sqlite-src', action='store_true', dest='sqlite_read', default=False, help='read from sqlite database') + group.add_option('--sqlite-src-file', help='source database file', metavar='FILE') + group.add_option('--sqlite-src-table', help='source database table', metavar='TABLE') + group.add_option('--sqlite-poll-interval', help='how often to poll the database in seconds', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'rrd source options') + group.add_option('--rrd-src', action='store_true', dest='rrd_read', default=False, help='read from rrd') + group.add_option('--rrd-src-dir', help='directory for rrd files', metavar='DIR') + group.add_option('--rrd-src-step', help='step size in seconds', metavar='STEP') + group.add_option('--rrd-poll-interval', help='how often to poll the rrd in seconds', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'mysql options') + group.add_option('--mysql', action='store_true', dest='mysql_out', default=False, help='write data to mysql database') + group.add_option('--mysql-host', help='database host', metavar='HOSTNAME') + group.add_option('--mysql-user', help='database user', metavar='USERNAME') + group.add_option('--mysql-passwd', help='database password', metavar='PASSWORD') + group.add_option('--mysql-database', help='database name', metavar='DATABASE') + group.add_option('--mysql-table', help='database table', metavar='TABLE') + group.add_option('--mysql-insert-period', help='database insert period in seconds', metavar='PERIOD') + group.add_option('--mysql-persistent-connection', action='store_true', default=False, help='maintain a persistent connection to database') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'sqlite options') + group.add_option('--sqlite', action='store_true', dest='sqlite_out', default=False, help='write data to sqlite database') + group.add_option('--sqlite-file', help='database filename', metavar='FILE') + group.add_option('--sqlite-table', help='database table', metavar='TABLE') + group.add_option('--sqlite-insert-period', help='database insert period in seconds', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'rrd options') + group.add_option('--rrd', action='store_true', dest='rrd_out', default=False, help='write data to round-robin database') + group.add_option('--rrd-dir', help='directory for rrd files', metavar='DIR') + group.add_option('--rrd-step', help='step size in seconds', metavar='STEP') + group.add_option('--rrd-heartbeat', help='heartbeat in seconds', metavar='HEARTBEAT') + group.add_option('--rrd-update-period', help='update period in seconds', metavar='PERIOD') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'WattzOn options') + group.add_option('--wattzon', action='store_true', dest='wattzon_out', default=False, help='upload data using WattzOn API') + group.add_option('--wo-user', help='username', metavar='USERNAME') + group.add_option('--wo-pass', help='password', metavar='PASSWORD') + group.add_option('--wo-api-key', help='API key', metavar='KEY') + group.add_option('--wo-map', help='channel-to-meter mapping', metavar='MAP') + group.add_option('--wo-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--wo-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'PlotWatt options') + group.add_option('--plotwatt', action='store_true', dest='plotwatt_out', default=False, help='upload data using PlotWatt API') + group.add_option('--pw-house-id', help='house ID', metavar='ID') + group.add_option('--pw-api-key', help='API key', metavar='KEY') + group.add_option('--pw-map', help='channel-to-meter mapping', metavar='MAP') + group.add_option('--pw-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--pw-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'EnerSave options') + group.add_option('--enersave', action='store_true', dest='enersave_out', default=False, help='upload data using EnerSave API (deprecated)') + group.add_option('--es-token', help='token', metavar='TOKEN') + group.add_option('--es-url', help='URL', metavar='URL') + group.add_option('--es-map', help='channel-to-device mapping', metavar='MAP') + group.add_option('--es-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--es-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'Bidgely options') + group.add_option('--bidgely', action='store_true', dest='bidgely_out', default=False, help='upload data using Bidgely API') + group.add_option('--by-url', help='URL', metavar='URL') + group.add_option('--by-map', help='channel-to-device mapping', metavar='MAP') + group.add_option('--by-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--by-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'PeoplePower options') + group.add_option('--peoplepower', action='store_true', dest='peoplepower_out', default=False, help='upload data using PeoplePower API') + group.add_option('--pp-token', help='auth token', metavar='TOKEN') + group.add_option('--pp-hub-id', help='hub ID', metavar='ID') + group.add_option('--pp-url', help='URL', metavar='URL') + group.add_option('--pp-map', help='channel-to-device mapping', metavar='MAP') + group.add_option('--pp-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--pp-timeout', help='timeout period in seconds', metavar='TIMEOUT') + group.add_option('--pp-add-devices', help='add devices on startup') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'Eragy options') + group.add_option('--eragy', action='store_true', dest='eragy_out', default=False, help='upload data using Eragy API') + group.add_option('--eg-gateway-id', help='gateway id', metavar='ID') + group.add_option('--eg-token', help='token', metavar='TOKEN') + group.add_option('--eg-url', help='URL', metavar='URL') + group.add_option('--eg-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--eg-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'Smart Energy Groups options') + group.add_option('--smartenergygroups', action='store_true', dest='smartenergygroups_out', default=False, help='upload data using SmartEnergyGroups API') + group.add_option('--seg-token', help='token', metavar='TOKEN') + group.add_option('--seg-url', help='URL', metavar='URL') + group.add_option('--seg-map', help='channel-to-device mapping', metavar='MAP') + group.add_option('--seg-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--seg-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'ThingSpeak options') + group.add_option('--thingspeak', action='store_true', dest='thingspeak_out', default=False, help='upload data using ThingSpeak API') + group.add_option('--ts-url', help='URL', metavar='URL') + group.add_option('--ts-tokens', help='ECM-to-ID/token mapping', metavar='TOKENS') + group.add_option('--ts-fields', help='channel-to-field mapping', metavar='FIELDS') + group.add_option('--ts-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--ts-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'Pachube options') + group.add_option('--pachube', action='store_true', dest='pachube_out', default=False, help='upload data using Pachube API') + group.add_option('--pbe-url', help='URL', metavar='URL') + group.add_option('--pbe-token', help='token', metavar='TOKEN') + group.add_option('--pbe-feed', help='feed', metavar='FEED') + group.add_option('--pbe-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--pbe-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'OpenEnergyMonitor options') + group.add_option('--oem', action='store_true', dest='oem_out', default=False, help='upload data using OpenEnergyMonitor API') + group.add_option('--oem-url', help='URL', metavar='URL') + group.add_option('--oem-token', help='token', metavar='TOKEN') + group.add_option('--oem-node', help='node', metavar='NODE') + group.add_option('--oem-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--oem-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'Wattvision options') + group.add_option('--wattvision', action='store_true', dest='wattvision_out', default=False, help='upload data using Wattvision API') + group.add_option('--wv-url', help='URL', metavar='URL') + group.add_option('--wv-api-id', help='api id', metavar='ID') + group.add_option('--wv-api-key', help='api key', metavar='KEY') + group.add_option('--wv-sensor-id', help='sensor id', metavar='SENSOR') + group.add_option('--wv-channel', help='channel of device', metavar='XXXXXX_cY') + group.add_option('--wv-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--wv-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'PVOutput options') + group.add_option('--pvo', action='store_true', dest='pvo_out', default=False, help='upload data using PVOutput API') + group.add_option('--pvo-url', help='URL', metavar='URL') + group.add_option('--pvo-api-key', help='key', metavar='KEY') + group.add_option('--pvo-system-id', help='id', metavar='ID') + group.add_option('--pvo-generation-channel', help='channel that monitors power/energy generation') + group.add_option('--pvo-consumption-channel', help='channel that monitors power/energy consumption') + group.add_option('--pvo-temperature-channel', help='channel that monitors temperature') + group.add_option('--pvo-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--pvo-timeout', help='timeout period in seconds', metavar='TIMEOUT') + parser.add_option_group(group) + + group = optparse.OptionGroup(parser, 'MQTT options') + group.add_option('--mqtt', action='store_true', dest='mqtt_out', default=False, help='upload data using MQTT API') + group.add_option('--mqtt-host', help='mqtt host', metavar='HOSTNAME') + group.add_option('--mqtt-port', type='int', help='mqtt port', metavar='PORT') + group.add_option('--mqtt-clientid', help='client-id', metavar='CLIENTID') + group.add_option('--mqtt-base-topic', help='base topic', metavar='TOPIC') + group.add_option('--mqtt-qos', type='int', help='quality of service', metavar='QOS') + group.add_option('--mqtt-retain', action='store_true', help='retain msg as last good value', metavar='RETAIN') + group.add_option('--mqtt-will', help='mqtt will for the client', metavar='{"topic": "", "payload":"", "qos":, "retain":}') + group.add_option('--mqtt-user', help='user', metavar='USERNAME') + group.add_option('--mqtt-passwd', help='password', metavar='PASSWORD') + group.add_option('--mqtt-tls', help='tls credentials', metavar='{"ca_certs":"", "certfile":"", "keyfile":"", "tls_version":"", "ciphers":""}') + group.add_option('--mqtt-map', help='channel-to-topic mapping', metavar=',,...,') + group.add_option('--mqtt-upload-period', type='int', help='upload period in seconds', metavar='PERIOD') + + group = optparse.OptionGroup(parser, 'InfluxDB options') + group.add_option('--influxdb', action='store_true', dest='influxdb_out', default=False, help='upload data to InfluxBD') + group.add_option('--influxdb-username', help='username', metavar='USERNAME') + group.add_option('--influxdb-password', help='password', metavar='PASSWORD') + group.add_option('--influxdb-host', help='HOST', metavar='HOST') + group.add_option('--influxdb-port', help='PORT', metavar='PORT') + group.add_option('--influxdb-database', help='DATABASE', metavar='DATABASE') + group.add_option('--influxdb-mode', choices=['row', 'col'], help='row (1 series w/ many values) or col (many series w/ 1 value each)', metavar='MODE') + group.add_option('--influxdb-measurement', help='MEASUREMENT', metavar='MEASUREMENT') + group.add_option('--influxdb-map', help='channel-to-device mapping', metavar='MAP') + group.add_option('--influxdb-tags', help='map of shared tags to add (a,b,c,d adds tag a with value b, tag c with value d)', metavar='MAP') + group.add_option('--influxdb-upload-period', help='upload period in seconds', metavar='PERIOD') + group.add_option('--influxdb-timeout', help='timeout period in seconds', metavar='TIMEOUT') + group.add_option('--influxdb-db-schema', help='selected database schema', metavar='DB_SCHEMA') + parser.add_option_group(group) + + (options, args) = parser.parse_args() + + if options.quiet: + LOGLEVEL = LOG_ERROR + if options.debug: + LOGLEVEL = LOG_DEBUG + + # if there is a configration file, read the parameters from file and set + # values on the options object. + if options.configfile: + if not ConfigParser: + print 'ConfigParser not loaded, cannot parse config file' + sys.exit(1) + config = ConfigParser.ConfigParser() + try: + config.read(options.configfile) + for section in config.sections(): # section names do not matter + for name, value in config.items(section): + if not getattr(options, name): + setattr(options, name, cleanvalue(value)) + except AttributeError, e: + print 'unknown parameter in config file: %s' % e + sys.exit(1) + except Exception, e: + print e + sys.exit(1) + + infmsg('btmon: %s' % __version__) + infmsg('python: %s' % sys.version) + infmsg('platform: %s' % sys.platform) + + if options.skip_upload: + SKIP_UPLOAD = 1 + if options.trust_device_clock: + TRUST_DEVICE_CLOCK = 1 + if options.device_clock_is_utc: + DEVICE_CLOCK_IS_UTC = 1 + if options.reverse_polarity: + REVERSE_POLARITY = 1 + infmsg('polarity is reversed') + if options.include_current: + INCLUDE_CURRENT = 1 + if options.full_serials: + OBFUSCATE_SERIALS = 0 + + if not options.buffer_size: + options.buffer_size = DEFAULT_BUFFER_SIZE + BUFFER_SIZE = int(options.buffer_size) + + if not options.device_type: + options.device_type = DEFAULT_DEVICE_TYPE + if options.device_type == DEV_ECM1220: + DEVICE_TYPE = ECM1220Device() + elif options.device_type == DEV_ECM1240: + DEVICE_TYPE = ECM1240Device() + elif options.device_type == DEV_GEM: + DEVICE_TYPE = GEMDevice() + else: + print "Unsupported device type '%s'" % options.device_type + print 'supported device types include:' + for dev in DEVICE_TYPES: + print ' %s' % dev + sys.exit(1) + infmsg('device type: %s' % DEVICE_TYPE.NAME) + + if not options.device_list: + options.device_list = DEVICE_TYPE.DEFAULT_DEVICE_LIST + try: + DEVICE_TYPE.check_identifiers(options.device_list) + except Exception, e: + print e + sys.exit(1) + DEVICE_LIST = DEVICE_TYPE.extract_identifiers(options.device_list) + infmsg('device list: %s' % DEVICE_LIST) + + if not options.packet_format: + options.packet_format = DEVICE_TYPE.DEFAULT_PACKET_FORMAT + if options.packet_format == PF_ECM1240BIN: + PACKET_FORMAT = ECM1240BinaryPacket() + elif options.packet_format == PF_ECM1220BIN: + PACKET_FORMAT = ECM1220BinaryPacket() + elif options.packet_format == PF_GEM48PTBIN: + PACKET_FORMAT = GEM48PTBinaryPacket() + elif options.packet_format == PF_GEM48PBIN: + PACKET_FORMAT = GEM48PBinaryPacket() + else: + print "Unsupported packet format '%s'" % options.packet_format + print 'supported formats include:' + for fmt in PACKET_FORMATS: + print ' %s' % fmt + sys.exit(1) + infmsg('packet format: %s' % PACKET_FORMAT.NAME) + + if not options.db_schema: + options.db_schema = DEFAULT_DB_SCHEMA + if options.db_schema == DB_SCHEMA_COUNTERS: + SCHEMA = CountersSchema() + elif options.db_schema == DB_SCHEMA_ECMREAD: + SCHEMA = ECMReadSchema() + elif options.db_schema == DB_SCHEMA_ECMREADEXT: + SCHEMA = ECMReadExtSchema() + else: + print "Unsupported database schema '%s'" % options.db_schema + print 'supported schemas include:' + for fmt in DB_SCHEMAS: + print ' %s' % fmt + sys.exit(1) + infmsg('schema: %s' % SCHEMA.NAME) + + # determine the default table name + # we use a combination of the packet format and schema for the table name + # in order to minimize conflicts. + dbtable = '%s_%s' % (PACKET_FORMAT.NAME, SCHEMA.NAME) + + # Database Setup + # run the database configurator then exit + if options.mysql_config: + db = MySQLConfigurator(options.mysql_host or DB_HOST, + options.mysql_user or DB_USER, + options.mysql_passwd or DB_PASSWD, + options.mysql_database or DB_DATABASE, + options.mysql_table or dbtable) + db.configure() + sys.exit(0) + if options.sqlite_config: + db = SqliteConfigurator(options.sqlite_file or DB_FILENAME, + options.sqlite_table or dbtable) + db.configure() + sys.exit(0) + + # Data Collector setup + if options.serial_read: + if options.serial_poll_interval and options.serial_poll_interval > 0: + col = PollingSerialCollector(options.serial_port or SERIAL_PORT, + options.serial_baud or SERIAL_BAUD, + options.serial_poll_interval) + else: + col = BlockingSerialCollector(options.serial_port or SERIAL_PORT, + options.serial_baud or SERIAL_BAUD) + + elif options.ip_read: + if (options.ip_mode and not + (options.ip_mode == 'client' or options.ip_mode == 'server')): + print 'Unknown mode %s: use client or server' % options.ip_mode + sys.exit(1) + + mode = options.ip_mode or IP_DEFAULT_MODE + if mode == 'server': + col = SocketServerCollector(options.ip_host or IP_SERVER_HOST, + options.ip_port or IP_SERVER_PORT) + elif options.ip_poll_interval and options.ip_poll_interval > 0: + col = PollingSocketClientCollector(options.ip_host, + options.ip_port or IP_CLIENT_PORT, + options.ip_poll_interval) + else: + col = BlockingSocketClientCollector(options.ip_host, + options.ip_port or IP_CLIENT_PORT) + + elif options.mysql_read: + col = MySQLCollector(options.mysql_src_host or DB_HOST, + options.mysql_src_user or DB_USER, + options.mysql_src_passwd or DB_PASSWD, + options.mysql_src_database or DB_DATABASE, + options.mysql_src_table or dbtable, + options.mysql_poll_interval or DB_POLL_INTERVAL) + + elif options.sqlite_read: + col = SqliteCollector(options.sqlite_src_file or DB_FILENAME, + options.sqlite_src_table or dbtable, + options.sqlite_poll_interval or DB_POLL_INTERVAL) + + elif options.rrd_read: + col = RRDCollector(options.rrd_src_dir or RRD_DIR, + options.rrd_src_step or RRD_STEP, + options.rrd_poll_interval or RRD_POLL_INTERVAL) + + else: + print 'Please specify a data source (or \'-h\' for help):' + print ' --serial read from serial' + print ' --ip read from tcp/ip socket' + print ' --mysql-src read from mysql database' + print ' --sqlite-src read from sqlite database' + print ' --rrd-src read from rrd' + sys.exit(1) + + # Packet Processor Setup + if not (options.print_out or options.mysql_out or options.sqlite_out or + options.rrd_out or options.wattzon_out or options.plotwatt_out or + options.enersave_out or options.bidgely_out or + options.peoplepower_out or options.eragy_out or + options.smartenergygroups_out or options.thingspeak_out or + options.pachube_out or options.oem_out or + options.wattvision_out or options.pvo_out or options.mqtt_out or + options.influxdb_out): + print 'Please specify one or more processing options (or \'-h\' for help):' + print ' --print print to screen' + print ' --mysql write to mysql database' + print ' --sqlite write to sqlite database' + print ' --rrd write to round-robin database' + print ' --influxdb write to influxdb database' + print ' --bidgely upload to Bidgely' + print ' --enersave upload to EnerSave (deprecated)' + print ' --eragy upload to Eragy' + print ' --mqtt upload to MQTT broker' + print ' --oem upload to OpenEnergyMonitor' + print ' --pachube upload to Pachube' + print ' --peoplepower upload to PeoplePower' + print ' --plotwatt upload to PlotWatt' + print ' --pvo upload to PVOutput' + print ' --smartenergygroups upload to SmartEnergyGroups' + print ' --thingspeak upload to ThingSpeak' + print ' --wattvision upload to Wattvision' + print ' --wattzon upload to WattzOn' + sys.exit(1) + + procs = [] + + if options.print_out: + procs.append(PrintProcessor()) + if options.mysql_out: + procs.append(MySQLProcessor + (options.mysql_host or DB_HOST, + options.mysql_user or DB_USER, + options.mysql_passwd or DB_PASSWD, + options.mysql_database or DB_DATABASE, + options.mysql_table or dbtable, + options.mysql_insert_period or DB_INSERT_PERIOD, + options.mysql_persistent_connection or False)) + if options.sqlite_out: + procs.append(SqliteProcessor + (options.sqlite_file or DB_FILENAME, + options.sqlite_table or dbtable, + options.sqlite_insert_period or DB_INSERT_PERIOD)) + if options.rrd_out: + procs.append(RRDProcessor + (options.rrd_dir or RRD_DIR, + options.rrd_step or RRD_STEP, + options.rrd_heartbeat or RRD_HEARTBEAT, + options.rrd_update_period or RRD_UPDATE_PERIOD)) + if options.wattzon_out: + procs.append(WattzOnProcessor + (options.wo_api_key or WATTZON_API_KEY, + options.wo_user or WATTZON_USER, + options.wo_pass or WATTZON_PASS, + options.wo_map or WATTZON_MAP, + options.wo_upload_period or WATTZON_UPLOAD_PERIOD, + options.wo_timeout or WATTZON_TIMEOUT)) + if options.plotwatt_out: + procs.append(PlotWattProcessor + (options.pw_api_key or PLOTWATT_API_KEY, + options.pw_house_id or PLOTWATT_HOUSE_ID, + options.pw_map or PLOTWATT_MAP, + options.pw_upload_period or PLOTWATT_UPLOAD_PERIOD, + options.pw_timeout or PLOTWATT_TIMEOUT)) + if options.enersave_out: + procs.append(EnerSaveProcessor + (options.es_url or ES_URL, + options.es_token or ES_TOKEN, + options.es_map or ES_MAP, + options.es_upload_period or ES_UPLOAD_PERIOD, + options.es_timeout or ES_TIMEOUT)) + if options.bidgely_out: + procs.append(BidgelyProcessor + (options.by_url or BY_URL, + options.by_map or BY_MAP, + options.by_upload_period or BY_UPLOAD_PERIOD, + options.by_timeout or BY_TIMEOUT)) + if options.peoplepower_out: + procs.append(PeoplePowerProcessor + (options.pp_url or PPCO_URL, + options.pp_token or PPCO_TOKEN, + options.pp_hub_id or PPCO_HUBID, + options.pp_map or PPCO_MAP, + options.pp_upload_period or PPCO_UPLOAD_PERIOD, + options.pp_timeout or PPCO_TIMEOUT, + options.pp_add_devices)) + if options.eragy_out: + procs.append(EragyProcessor + (options.eg_url or ERAGY_URL, + options.eg_gateway_id or ERAGY_GATEWAY_ID, + options.eg_token or ERAGY_TOKEN, + options.eg_upload_period or ERAGY_UPLOAD_PERIOD, + options.eg_timeout or ERAGY_TIMEOUT)) + if options.smartenergygroups_out: + procs.append(SmartEnergyGroupsProcessor + (options.seg_url or SEG_URL, + options.seg_token or SEG_TOKEN, + options.seg_map or SEG_MAP, + options.seg_upload_period or SEG_UPLOAD_PERIOD, + options.seg_timeout or SEG_TIMEOUT)) + if options.thingspeak_out: + procs.append(ThingSpeakProcessor + (options.ts_url or TS_URL, + options.ts_tokens or TS_TOKENS, + options.ts_fields or TS_FIELDS, + options.ts_upload_period or TS_UPLOAD_PERIOD, + options.ts_timeout or TS_TIMEOUT)) + if options.pachube_out: + procs.append(PachubeProcessor + (options.pbe_url or PBE_URL, + options.pbe_token or PBE_TOKEN, + options.pbe_feed or PBE_FEED, + options.pbe_upload_period or PBE_UPLOAD_PERIOD, + options.pbe_timeout or PBE_TIMEOUT)) + if options.oem_out: + procs.append(OpenEnergyMonitorProcessor + (options.oem_url or OEM_URL, + options.oem_token or OEM_TOKEN, + options.oem_node or OEM_NODE, + options.oem_upload_period or OEM_UPLOAD_PERIOD, + options.oem_timeout or OEM_TIMEOUT)) + if options.wattvision_out: + procs.append(WattvisionProcessor + (options.wv_url or WV_URL, + options.wv_api_id or WV_API_ID, + options.wv_api_key or WV_API_KEY, + options.wv_sensor_id or WV_SENSOR_ID, + options.wv_channel or WV_CHANNEL, + options.wv_upload_period or WV_UPLOAD_PERIOD, + options.wv_timeout or WV_TIMEOUT)) + if options.pvo_out: + procs.append(PVOutputProcessor + (options.pvo_url or PVO_URL, + options.pvo_api_key or PVO_API_KEY, + options.pvo_system_id or PVO_SYSTEM_ID, + options.pvo_generation_channel or PVO_GEN_CHANNEL, + options.pvo_consumption_channel or PVO_CON_CHANNEL, + options.pvo_temperature_channel or PVO_TEMP_CHANNEL, + options.pvo_upload_period or PVO_UPLOAD_PERIOD, + options.pvo_timeout or PVO_TIMEOUT)) + if options.mqtt_out: + procs.append(MQTTProcessor + (options.mqtt_host or MQTT_HOST, + options.mqtt_port or MQTT_PORT, + options.mqtt_clientid or MQTT_CLIENTID, + options.mqtt_base_topic or MQTT_BASE_TOPIC, + options.mqtt_qos or MQTT_QOS, + options.mqtt_retain or MQTT_RETAIN, + options.mqtt_will, + options.mqtt_user, + options.mqtt_passwd, + options.mqtt_tls, + options.mqtt_map or MQTT_MAP, + options.mqtt_upload_period or MQTT_UPLOAD_PERIOD)) + if options.influxdb_out: + if not InfluxDBClient: + print 'InfluxDBClient not loaded, cannot write to InfluxDB' + sys.exit(1) + procs.append(InfluxDBProcessor + (options.influxdb_host or INFLUXDB_HOST, + options.influxdb_port or INFLUXDB_PORT, + options.influxdb_username or INFLUXDB_USERNAME, + options.influxdb_password or INFLUXDB_PASSWORD, + options.influxdb_database or INFLUXDB_DATABASE, + options.influxdb_mode or INFLUXDB_MODE, + options.influxdb_measurement or INFLUXDB_MEASUREMENT, + options.influxdb_map or INFLUXDB_MAP, + options.influxdb_tags or INFLUXDB_TAG_MAP, + options.influxdb_upload_period or INFLUXDB_UPLOAD_PERIOD, + options.influxdb_timeout or INFLUXDB_TIMEOUT, + options.influxdb_db_schema or INFLUXDB_DB_SCHEMA)) + + mon = Monitor(col, procs) + mon.run() + + sys.exit(0) diff --git a/build b/build new file mode 100644 index 0000000..50ec988 --- /dev/null +++ b/build @@ -0,0 +1,4 @@ +#!/bin/bash + +SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )" +cd "${SCRIPT_DIR}" && docker build --tag="psarossy/btmon" .