Copy in apsystems_ecur project from Github
https://github.com/ksheumaker/homeassistant-apsystems_ecur.git
This commit is contained in:
parent
9e1f55f7dc
commit
40eba650da
413
server/apsystems_ecur/APSystemsECUR.py
Executable file
413
server/apsystems_ecur/APSystemsECUR.py
Executable file
|
@ -0,0 +1,413 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import asyncio
|
||||
import socket
|
||||
import binascii
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
from pprint import pprint
|
||||
|
||||
class APSystemsInvalidData(Exception):
|
||||
pass
|
||||
|
||||
class APSystemsECUR:
|
||||
|
||||
def __init__(self, ipaddr, port=8899, raw_ecu=None, raw_inverter=None):
|
||||
self.ipaddr = ipaddr
|
||||
self.port = port
|
||||
|
||||
# what do we expect socket data to end in
|
||||
self.recv_suffix = b'END\n'
|
||||
|
||||
# how long to wait on socket commands until we get our recv_suffix
|
||||
self.timeout = 5
|
||||
|
||||
# how many times do we try the same command in a single update before failing
|
||||
self.cmd_attempts = 3
|
||||
|
||||
# how big of a buffer to read at a time from the socket
|
||||
self.recv_size = 4096
|
||||
|
||||
self.qs1_ids = [ "802", "801" ]
|
||||
self.yc600_ids = [ "406", "407", "408", "409" ]
|
||||
self.yc1000_ids = [ "501", "502", "503", "504" ]
|
||||
|
||||
self.cmd_suffix = "END\n"
|
||||
self.ecu_query = "APS1100160001" + self.cmd_suffix
|
||||
self.inverter_query_prefix = "APS1100280002"
|
||||
self.inverter_query_suffix = self.cmd_suffix
|
||||
|
||||
self.inverter_signal_prefix = "APS1100280030"
|
||||
self.inverter_signal_suffix = self.cmd_suffix
|
||||
|
||||
self.inverter_byte_start = 26
|
||||
|
||||
self.ecu_id = None
|
||||
self.qty_of_inverters = 0
|
||||
self.lifetime_energy = 0
|
||||
self.current_power = 0
|
||||
self.today_energy = 0
|
||||
self.inverters = []
|
||||
self.firmware = None
|
||||
self.timezone = None
|
||||
self.last_update = None
|
||||
|
||||
self.ecu_raw_data = raw_ecu
|
||||
self.inverter_raw_data = raw_inverter
|
||||
self.inverter_raw_signal = None
|
||||
|
||||
self.read_buffer = b''
|
||||
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
|
||||
|
||||
def dump(self):
|
||||
print(f"ECU : {self.ecu_id}")
|
||||
print(f"Firmware : {self.firmware}")
|
||||
print(f"TZ : {self.timezone}")
|
||||
print(f"Qty of inverters : {self.qty_of_inverters}")
|
||||
|
||||
async def async_read_from_socket(self):
|
||||
self.read_buffer = b''
|
||||
end_data = None
|
||||
|
||||
while end_data != self.recv_suffix:
|
||||
self.read_buffer += await self.reader.read(self.recv_size)
|
||||
size = len(self.read_buffer)
|
||||
end_data = self.read_buffer[size-4:]
|
||||
|
||||
return self.read_buffer
|
||||
|
||||
async def async_send_read_from_socket(self, cmd):
|
||||
current_attempt = 0
|
||||
while current_attempt < self.cmd_attempts:
|
||||
current_attempt += 1
|
||||
|
||||
self.writer.write(cmd.encode('utf-8'))
|
||||
await self.writer.drain()
|
||||
|
||||
try:
|
||||
return await asyncio.wait_for(self.async_read_from_socket(),
|
||||
timeout=self.timeout)
|
||||
except Exception as err:
|
||||
pass
|
||||
|
||||
self.writer.close()
|
||||
raise APSystemsInvalidData(f"Incomplete data from ECU after {current_attempt} attempts, cmd='{cmd.rstrip()}' data={self.read_buffer}")
|
||||
|
||||
async def async_query_ecu(self):
|
||||
self.reader, self.writer = await asyncio.open_connection(self.ipaddr, self.port)
|
||||
_LOGGER.info(f"Connected to {self.ipaddr} {self.port}")
|
||||
|
||||
cmd = self.ecu_query
|
||||
self.ecu_raw_data = await self.async_send_read_from_socket(cmd)
|
||||
|
||||
self.process_ecu_data()
|
||||
|
||||
cmd = self.inverter_query_prefix + self.ecu_id + self.inverter_query_suffix
|
||||
self.inverter_raw_data = await self.async_send_read_from_socket(cmd)
|
||||
|
||||
cmd = self.inverter_signal_prefix + self.ecu_id + self.inverter_signal_suffix
|
||||
self.inverter_raw_signal = await self.async_send_read_from_socket(cmd)
|
||||
|
||||
self.writer.close()
|
||||
|
||||
data = self.process_inverter_data()
|
||||
data["ecu_id"] = self.ecu_id
|
||||
data["today_energy"] = self.today_energy
|
||||
data["lifetime_energy"] = self.lifetime_energy
|
||||
data["current_power"] = self.current_power
|
||||
|
||||
return(data)
|
||||
|
||||
def query_ecu(self):
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.connect((self.ipaddr,self.port))
|
||||
|
||||
sock.sendall(self.ecu_query.encode('utf-8'))
|
||||
self.ecu_raw_data = sock.recv(self.recv_size)
|
||||
|
||||
self.process_ecu_data()
|
||||
|
||||
cmd = self.inverter_query_prefix + self.ecu_id + self.inverter_query_suffix
|
||||
sock.sendall(cmd.encode('utf-8'))
|
||||
self.inverter_raw_data = sock.recv(self.recv_size)
|
||||
|
||||
cmd = self.inverter_signal_prefix + self.ecu_id + self.inverter_signal_suffix
|
||||
sock.sendall(cmd.encode('utf-8'))
|
||||
self.inverter_raw_signal = sock.recv(self.recv_size)
|
||||
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
||||
|
||||
data = self.process_inverter_data()
|
||||
|
||||
data["ecu_id"] = self.ecu_id
|
||||
data["today_energy"] = self.today_energy
|
||||
data["lifetime_energy"] = self.lifetime_energy
|
||||
data["current_power"] = self.current_power
|
||||
|
||||
|
||||
return(data)
|
||||
|
||||
def aps_int(self, codec, start):
|
||||
try:
|
||||
return int(binascii.b2a_hex(codec[(start):(start+2)]), 16)
|
||||
except ValueError as err:
|
||||
debugdata = binascii.b2a_hex(codec)
|
||||
raise APSystemsInvalidData(f"Unable to convert binary to int location={start} data={debugdata}")
|
||||
|
||||
def aps_short(self, codec, start):
|
||||
try:
|
||||
return int(binascii.b2a_hex(codec[(start):(start+1)]), 8)
|
||||
except ValueError as err:
|
||||
debugdata = binascii.b2a_hex(codec)
|
||||
raise APSystemsInvalidData(f"Unable to convert binary to short int location={start} data={debugdata}")
|
||||
|
||||
def aps_double(self, codec, start):
|
||||
try:
|
||||
return int (binascii.b2a_hex(codec[(start):(start+4)]), 16)
|
||||
except ValueError as err:
|
||||
debugdata = binascii.b2a_hex(codec)
|
||||
raise APSystemsInvalidData(f"Unable to convert binary to double location={start} data={debugdata}")
|
||||
|
||||
def aps_bool(self, codec, start):
|
||||
return bool(binascii.b2a_hex(codec[(start):(start+2)]))
|
||||
|
||||
def aps_uid(self, codec, start):
|
||||
return str(binascii.b2a_hex(codec[(start):(start+12)]))[2:14]
|
||||
|
||||
def aps_str(self, codec, start, amount):
|
||||
return str(codec[start:(start+amount)])[2:(amount+2)]
|
||||
|
||||
def aps_timestamp(self, codec, start, amount):
|
||||
timestr=str(binascii.b2a_hex(codec[start:(start+amount)]))[2:(amount+2)]
|
||||
return timestr[0:4]+"-"+timestr[4:6]+"-"+timestr[6:8]+" "+timestr[8:10]+":"+timestr[10:12]+":"+timestr[12:14]
|
||||
|
||||
def check_ecu_checksum(self, data, cmd):
|
||||
datalen = len(data) - 1
|
||||
try:
|
||||
checksum = int(data[5:9])
|
||||
except ValueError as err:
|
||||
debugdata = binascii.b2a_hex(data)
|
||||
raise APSystemsInvalidData(f"Error getting checksum int from '{cmd}' data={debugdata}")
|
||||
|
||||
if datalen != checksum:
|
||||
debugdata = binascii.b2a_hex(data)
|
||||
raise APSystemsInvalidData(f"Checksum on '{cmd}' failed checksum={checksum} datalen={datalen} data={debugdata}")
|
||||
|
||||
start_str = self.aps_str(data, 0, 3)
|
||||
end_str = self.aps_str(data, len(data) - 4, 3)
|
||||
|
||||
if start_str != 'APS':
|
||||
debugdata = binascii.b2a_hex(data)
|
||||
raise APSystemsInvalidData(f"Result on '{cmd}' incorrect start signature '{start_str}' != APS data={debugdata}")
|
||||
|
||||
if end_str != 'END':
|
||||
debugdata = binascii.b2a_hex(data)
|
||||
raise APSystemsInvalidData(f"Result on '{cmd}' incorrect end signature '{end_str}' != END data={debugdata}")
|
||||
|
||||
return True
|
||||
|
||||
def process_ecu_data(self, data=None):
|
||||
if not data:
|
||||
data = self.ecu_raw_data
|
||||
|
||||
self.check_ecu_checksum(data, "ECU Query")
|
||||
|
||||
self.ecu_id = self.aps_str(data, 13, 12)
|
||||
self.qty_of_inverters = self.aps_int(data, 46)
|
||||
self.firmware = self.aps_str(data, 55, 15)
|
||||
self.timezone = self.aps_str(data, 70, 9)
|
||||
self.lifetime_energy = self.aps_double(data, 27) / 10
|
||||
self.today_energy = self.aps_double(data, 35) / 100
|
||||
self.current_power = self.aps_double(data, 31)
|
||||
|
||||
|
||||
def process_signal_data(self, data=None):
|
||||
signal_data = {}
|
||||
|
||||
if not data:
|
||||
data = self.inverter_raw_signal
|
||||
|
||||
self.check_ecu_checksum(data, "Signal Query")
|
||||
|
||||
if not self.qty_of_inverters:
|
||||
return signal_data
|
||||
|
||||
location = 15
|
||||
for i in range(0, self.qty_of_inverters):
|
||||
uid = self.aps_uid(data, location)
|
||||
location += 6
|
||||
|
||||
strength = data[location]
|
||||
location += 1
|
||||
|
||||
strength = int((strength / 255) * 100)
|
||||
signal_data[uid] = strength
|
||||
|
||||
return signal_data
|
||||
|
||||
def process_inverter_data(self, data=None):
|
||||
if not data:
|
||||
data = self.inverter_raw_data
|
||||
|
||||
self.check_ecu_checksum(data, "Inverter data")
|
||||
|
||||
output = {}
|
||||
|
||||
timestamp = self.aps_timestamp(data, 19, 14)
|
||||
inverter_qty = self.aps_int(data, 17)
|
||||
|
||||
self.last_update = timestamp
|
||||
output["timestamp"] = timestamp
|
||||
output["inverter_qty"] = inverter_qty
|
||||
output["inverters"] = {}
|
||||
|
||||
# this is the start of the loop of inverters
|
||||
location = self.inverter_byte_start
|
||||
|
||||
signal = self.process_signal_data()
|
||||
|
||||
inverters = {}
|
||||
for i in range(0, inverter_qty):
|
||||
|
||||
inv={}
|
||||
|
||||
inverter_uid = self.aps_uid(data, location)
|
||||
inv["uid"] = inverter_uid
|
||||
location += 6
|
||||
|
||||
inv["online"] = self.aps_bool(data, location)
|
||||
location += 1
|
||||
|
||||
inv["unknown"] = self.aps_str(data, location, 2)
|
||||
location += 2
|
||||
|
||||
inv["frequency"] = self.aps_int(data, location) / 10
|
||||
location += 2
|
||||
|
||||
inv["temperature"] = self.aps_int(data, location) - 100
|
||||
location += 2
|
||||
|
||||
inv["signal"] = signal.get(inverter_uid, 0)
|
||||
|
||||
# the first 3 digits determine the type of inverter
|
||||
inverter_type = inverter_uid[0:3]
|
||||
if inverter_type in self.yc600_ids:
|
||||
(channel_data, location) = self.process_yc600(data, location)
|
||||
inv.update(channel_data)
|
||||
|
||||
elif inverter_type in self.qs1_ids:
|
||||
(channel_data, location) = self.process_qs1(data, location)
|
||||
inv.update(channel_data)
|
||||
|
||||
elif inverter_type in self.yc1000_ids:
|
||||
(channel_data, location) = self.process_yc1000(data, location)
|
||||
inv.update(channel_data)
|
||||
|
||||
else:
|
||||
raise APSystemsInvalidData(f"Unsupported inverter type {inverter_type}")
|
||||
|
||||
inverters[inverter_uid] = inv
|
||||
|
||||
output["inverters"] = inverters
|
||||
return (output)
|
||||
|
||||
def process_yc1000(self, data, location):
|
||||
|
||||
power = []
|
||||
voltages = []
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltage = self.aps_int(data, location)
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltage = self.aps_int(data, location)
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltage = self.aps_int(data, location)
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltages.append(voltage)
|
||||
|
||||
output = {
|
||||
"model" : "YC1000",
|
||||
"channel_qty" : 4,
|
||||
"power" : power,
|
||||
"voltage" : voltages
|
||||
}
|
||||
|
||||
return (output, location)
|
||||
|
||||
|
||||
def process_qs1(self, data, location):
|
||||
|
||||
power = []
|
||||
voltages = []
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltage = self.aps_int(data, location)
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltages.append(voltage)
|
||||
|
||||
output = {
|
||||
"model" : "QS1",
|
||||
"channel_qty" : 4,
|
||||
"power" : power,
|
||||
"voltage" : voltages
|
||||
}
|
||||
|
||||
return (output, location)
|
||||
|
||||
|
||||
def process_yc600(self, data, location):
|
||||
power = []
|
||||
voltages = []
|
||||
|
||||
for i in range(0, 2):
|
||||
power.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
voltages.append(self.aps_int(data, location))
|
||||
location += 2
|
||||
|
||||
output = {
|
||||
"model" : "YC600",
|
||||
"channel_qty" : 2,
|
||||
"power" : power,
|
||||
"voltage" : voltages,
|
||||
}
|
||||
|
||||
return (output, location)
|
||||
|
||||
|
154
server/apsystems_ecur/__init__.py
Executable file
154
server/apsystems_ecur/__init__.py
Executable file
|
@ -0,0 +1,154 @@
|
|||
import logging
|
||||
|
||||
import voluptuous as vol
|
||||
import traceback
|
||||
from datetime import timedelta
|
||||
|
||||
from .APSystemsECUR import APSystemsECUR, APSystemsInvalidData
|
||||
from homeassistant.helpers.discovery import load_platform
|
||||
import homeassistant.helpers.config_validation as cv
|
||||
from homeassistant.const import CONF_HOST
|
||||
from homeassistant.helpers.entity import Entity
|
||||
from homeassistant.util import Throttle
|
||||
|
||||
from homeassistant.helpers.update_coordinator import (
|
||||
CoordinatorEntity,
|
||||
DataUpdateCoordinator,
|
||||
UpdateFailed,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
from .const import DOMAIN
|
||||
|
||||
CONF_INTERVAL = "interval"
|
||||
|
||||
CONFIG_SCHEMA = vol.Schema({
|
||||
DOMAIN : vol.Schema({
|
||||
vol.Required(CONF_HOST): cv.string,
|
||||
vol.Optional(CONF_INTERVAL) : cv.time_period_seconds
|
||||
})
|
||||
}, extra=vol.ALLOW_EXTRA)
|
||||
|
||||
PLATFORMS = [ "sensor", "binary_sensor" ]
|
||||
|
||||
|
||||
## handle all the communications with the ECUR class and deal with our need for caching, etc
|
||||
class ECUR():
|
||||
|
||||
def __init__(self, ipaddr):
|
||||
self.ecu = APSystemsECUR(ipaddr)
|
||||
self.cache_count = 0
|
||||
self.cache_max = 5
|
||||
self.data_from_cache = False
|
||||
self.querying = True
|
||||
self.error_messge = ""
|
||||
self.cached_data = {}
|
||||
|
||||
async def stop_query(self):
|
||||
self.querying = False
|
||||
|
||||
async def start_query(self):
|
||||
self.querying = True
|
||||
|
||||
async def update(self):
|
||||
data = {}
|
||||
|
||||
# if we aren't actively quering data, pull data form the cache
|
||||
# this is so we can stop querying after sunset
|
||||
if not self.querying:
|
||||
|
||||
_LOGGER.debug("Not querying ECU due to stopped")
|
||||
data = self.cached_data
|
||||
self.data_from_cache = True
|
||||
|
||||
data["data_from_cache"] = self.data_from_cache
|
||||
data["querying"] = self.querying
|
||||
return self.cached_data
|
||||
|
||||
_LOGGER.debug("Querying ECU")
|
||||
try:
|
||||
data = await self.ecu.async_query_ecu()
|
||||
_LOGGER.debug("Got data from ECU")
|
||||
|
||||
# we got good results, so we store it and set flags about our
|
||||
# cache state
|
||||
self.cached_data = data
|
||||
self.cache_count = 0
|
||||
self.data_from_cache = False
|
||||
self.error_message = ""
|
||||
|
||||
except APSystemsInvalidData as err:
|
||||
|
||||
msg = f"Using cached data from last successful communication from ECU. Error: {err}"
|
||||
_LOGGER.warning(msg)
|
||||
|
||||
# we got invalid data, so we need to pull from cache
|
||||
self.error_msg = msg
|
||||
self.cache_count += 1
|
||||
self.data_from_cache = True
|
||||
data = self.cached_data
|
||||
|
||||
if self.cache_count > self.cache_max:
|
||||
raise Exception(f"Error using cached data for more than {self.cache_max} times.")
|
||||
|
||||
except Exception as err:
|
||||
|
||||
msg = f"Using cached data from last successful communication from ECU. Error: {err}"
|
||||
_LOGGER.warning(msg)
|
||||
|
||||
# we got invalid data, so we need to pull from cache
|
||||
self.error_msg = msg
|
||||
self.cache_count += 1
|
||||
self.data_from_cache = True
|
||||
data = self.cached_data
|
||||
|
||||
if self.cache_count > self.cache_max:
|
||||
raise Exception(f"Error using cached data for more than {self.cache_max} times.")
|
||||
|
||||
data["data_from_cache"] = self.data_from_cache
|
||||
data["querying"] = self.querying
|
||||
_LOGGER.debug(f"Returning {data}")
|
||||
return data
|
||||
|
||||
async def async_setup(hass, config):
|
||||
""" Setup the APsystems platform """
|
||||
hass.data.setdefault(DOMAIN, {})
|
||||
|
||||
host = config[DOMAIN].get(CONF_HOST)
|
||||
interval = config[DOMAIN].get(CONF_INTERVAL)
|
||||
if not interval:
|
||||
interval = timedelta(seconds=60)
|
||||
|
||||
ecu = ECUR(host)
|
||||
|
||||
coordinator = DataUpdateCoordinator(
|
||||
hass,
|
||||
_LOGGER,
|
||||
name=DOMAIN,
|
||||
update_method=ecu.update,
|
||||
update_interval=interval,
|
||||
)
|
||||
|
||||
await coordinator.async_refresh()
|
||||
|
||||
hass.data[DOMAIN] = {
|
||||
"ecu" : ecu,
|
||||
"coordinator" : coordinator
|
||||
}
|
||||
|
||||
async def handle_stop_query(call):
|
||||
await ecu.stop_query()
|
||||
coordinator.async_refresh()
|
||||
|
||||
async def handle_start_query(call):
|
||||
await ecu.start_query()
|
||||
coordinator.async_refresh()
|
||||
|
||||
hass.services.async_register(DOMAIN, "start_query", handle_start_query)
|
||||
hass.services.async_register(DOMAIN, "stop_query", handle_stop_query)
|
||||
|
||||
for component in PLATFORMS:
|
||||
load_platform(hass, component, DOMAIN, {}, config)
|
||||
|
||||
return True
|
86
server/apsystems_ecur/binary_sensor.py
Executable file
86
server/apsystems_ecur/binary_sensor.py
Executable file
|
@ -0,0 +1,86 @@
|
|||
"""Example integration using DataUpdateCoordinator."""
|
||||
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
import async_timeout
|
||||
|
||||
from homeassistant.components.binary_sensor import (
|
||||
BinarySensorEntity,
|
||||
)
|
||||
|
||||
from homeassistant.helpers.update_coordinator import (
|
||||
CoordinatorEntity,
|
||||
DataUpdateCoordinator,
|
||||
UpdateFailed,
|
||||
)
|
||||
|
||||
from .const import (
|
||||
DOMAIN,
|
||||
RELOAD_ICON,
|
||||
CACHE_ICON
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
|
||||
|
||||
ecu = hass.data[DOMAIN].get("ecu")
|
||||
coordinator = hass.data[DOMAIN].get("coordinator")
|
||||
|
||||
|
||||
sensors = [
|
||||
APSystemsECUBinarySensor(coordinator, ecu, "data_from_cache",
|
||||
label="Using Cached Data", icon=CACHE_ICON),
|
||||
APSystemsECUBinarySensor(coordinator, ecu, "querying",
|
||||
label="Querying Enabled", icon=RELOAD_ICON),
|
||||
]
|
||||
|
||||
add_entities(sensors)
|
||||
|
||||
|
||||
class APSystemsECUBinarySensor(CoordinatorEntity, BinarySensorEntity):
|
||||
|
||||
def __init__(self, coordinator, ecu, field, label=None, devclass=None, icon=None):
|
||||
|
||||
super().__init__(coordinator)
|
||||
|
||||
self.coordinator = coordinator
|
||||
|
||||
self._ecu = ecu
|
||||
self._field = field
|
||||
self._label = label
|
||||
if not label:
|
||||
self._label = field
|
||||
self._icon = icon
|
||||
|
||||
self._name = f"ECU {self._label}"
|
||||
self._state = None
|
||||
|
||||
@property
|
||||
def unique_id(self):
|
||||
return f"{self._ecu.ecu.ecu_id}_{self._field}"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def is_on(self):
|
||||
return self.coordinator.data.get(self._field)
|
||||
|
||||
@property
|
||||
def icon(self):
|
||||
return self._icon
|
||||
|
||||
@property
|
||||
def device_state_attributes(self):
|
||||
|
||||
attrs = {
|
||||
"ecu_id" : self._ecu.ecu.ecu_id,
|
||||
"firmware" : self._ecu.ecu.firmware,
|
||||
"last_update" : self._ecu.ecu.last_update
|
||||
}
|
||||
return attrs
|
||||
|
||||
|
7
server/apsystems_ecur/const.py
Executable file
7
server/apsystems_ecur/const.py
Executable file
|
@ -0,0 +1,7 @@
|
|||
DOMAIN = 'apsystems_ecur'
|
||||
SOLAR_ICON = "mdi:solar-power"
|
||||
FREQ_ICON = "mdi:sine-wave"
|
||||
SIGNAL_ICON = "mdi:signal"
|
||||
RELOAD_ICON = "mdi:reload"
|
||||
CACHE_ICON = "mdi:cached"
|
||||
|
10
server/apsystems_ecur/manifest.json
Executable file
10
server/apsystems_ecur/manifest.json
Executable file
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"codeowners": ["@ksheumaker"],
|
||||
"config_flow": false,
|
||||
"dependencies": [],
|
||||
"documentation": "https://github.com/ksheumaker/homeassistant-apsystems_ecur",
|
||||
"domain": "apsystems_ecur",
|
||||
"name": "APSystems PV solar ECU-R",
|
||||
"version": "1.0.1",
|
||||
"requirements": []
|
||||
}
|
208
server/apsystems_ecur/sensor.py
Executable file
208
server/apsystems_ecur/sensor.py
Executable file
|
@ -0,0 +1,208 @@
|
|||
"""Example integration using DataUpdateCoordinator."""
|
||||
|
||||
from datetime import timedelta
|
||||
import logging
|
||||
|
||||
import async_timeout
|
||||
|
||||
from homeassistant.helpers.entity import Entity
|
||||
from homeassistant.helpers.update_coordinator import (
|
||||
CoordinatorEntity,
|
||||
DataUpdateCoordinator,
|
||||
UpdateFailed,
|
||||
)
|
||||
|
||||
from .const import (
|
||||
DOMAIN,
|
||||
SOLAR_ICON,
|
||||
FREQ_ICON,
|
||||
SIGNAL_ICON
|
||||
)
|
||||
|
||||
from homeassistant.const import (
|
||||
DEVICE_CLASS_TEMPERATURE,
|
||||
DEVICE_CLASS_POWER,
|
||||
DEVICE_CLASS_ENERGY,
|
||||
DEVICE_CLASS_VOLTAGE,
|
||||
DEVICE_CLASS_ENERGY,
|
||||
ENERGY_KILO_WATT_HOUR,
|
||||
POWER_WATT,
|
||||
VOLT,
|
||||
TEMP_CELSIUS,
|
||||
PERCENTAGE,
|
||||
FREQUENCY_HERTZ
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
|
||||
|
||||
ecu = hass.data[DOMAIN].get("ecu")
|
||||
coordinator = hass.data[DOMAIN].get("coordinator")
|
||||
|
||||
|
||||
sensors = [
|
||||
APSystemsECUSensor(coordinator, ecu, "current_power",
|
||||
label="Current Power", unit=POWER_WATT,
|
||||
devclass=DEVICE_CLASS_POWER, icon=SOLAR_ICON),
|
||||
APSystemsECUSensor(coordinator, ecu, "today_energy",
|
||||
label="Today Energy", unit=ENERGY_KILO_WATT_HOUR,
|
||||
devclass=DEVICE_CLASS_ENERGY, icon=SOLAR_ICON),
|
||||
APSystemsECUSensor(coordinator, ecu, "lifetime_energy",
|
||||
label="Lifetime Energy", unit=ENERGY_KILO_WATT_HOUR,
|
||||
devclass=DEVICE_CLASS_ENERGY, icon=SOLAR_ICON),
|
||||
]
|
||||
|
||||
inverters = coordinator.data.get("inverters", {})
|
||||
for uid,inv_data in inverters.items():
|
||||
#_LOGGER.warning(f"Inverter {uid} {inv_data.get('channel_qty')}")
|
||||
sensors.extend([
|
||||
APSystemsECUInverterSensor(coordinator, ecu, uid,
|
||||
"temperature", label="Temperature",
|
||||
devclass=DEVICE_CLASS_TEMPERATURE, unit=TEMP_CELSIUS),
|
||||
APSystemsECUInverterSensor(coordinator, ecu, uid,
|
||||
"frequency", label="Frequency", unit=FREQUENCY_HERTZ,
|
||||
devclass=None, icon=FREQ_ICON),
|
||||
APSystemsECUInverterSensor(coordinator, ecu, uid,
|
||||
"voltage", label="Voltage", unit=VOLT,
|
||||
devclass=DEVICE_CLASS_VOLTAGE),
|
||||
APSystemsECUInverterSensor(coordinator, ecu, uid,
|
||||
"signal", label="Signal", unit=PERCENTAGE,
|
||||
icon=SIGNAL_ICON)
|
||||
|
||||
])
|
||||
for i in range(0, inv_data.get("channel_qty", 0)):
|
||||
sensors.append(
|
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, f"power",
|
||||
index=i, label=f"Power Ch {i+1}", unit=POWER_WATT,
|
||||
devclass=DEVICE_CLASS_POWER, icon=SOLAR_ICON)
|
||||
)
|
||||
|
||||
add_entities(sensors)
|
||||
|
||||
|
||||
class APSystemsECUInverterSensor(CoordinatorEntity, Entity):
|
||||
def __init__(self, coordinator, ecu, uid, field, index=0, label=None, icon=None, unit=None, devclass=None):
|
||||
|
||||
super().__init__(coordinator)
|
||||
|
||||
self.coordinator = coordinator
|
||||
|
||||
self._index = index
|
||||
self._uid = uid
|
||||
self._ecu = ecu
|
||||
self._field = field
|
||||
self._devclass = devclass
|
||||
self._label = label
|
||||
if not label:
|
||||
self._label = field
|
||||
self._icon = icon
|
||||
self._unit = unit
|
||||
|
||||
self._name = f"Inverter {self._uid} {self._label}"
|
||||
self._state = None
|
||||
|
||||
@property
|
||||
def unique_id(self):
|
||||
field = self._field
|
||||
if self._index != None:
|
||||
field = f"{field}_{self._index}"
|
||||
return f"{self._ecu.ecu.ecu_id}_{self._uid}_{field}"
|
||||
|
||||
@property
|
||||
def device_class(self):
|
||||
return self._devclass
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
#_LOGGER.warning(f"State called for {self._field}")
|
||||
if self._field == "voltage":
|
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get("voltage", [])[0]
|
||||
elif self._field == "power":
|
||||
#_LOGGER.warning(f"POWER {self._uid} {self._index}")
|
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get("power", [])[self._index]
|
||||
else:
|
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get(self._field)
|
||||
|
||||
@property
|
||||
def icon(self):
|
||||
return self._icon
|
||||
|
||||
@property
|
||||
def unit_of_measurement(self):
|
||||
return self._unit
|
||||
|
||||
|
||||
@property
|
||||
def device_state_attributes(self):
|
||||
|
||||
attrs = {
|
||||
"ecu_id" : self._ecu.ecu.ecu_id,
|
||||
"last_update" : self._ecu.ecu.last_update,
|
||||
}
|
||||
return attrs
|
||||
|
||||
|
||||
|
||||
class APSystemsECUSensor(CoordinatorEntity, Entity):
|
||||
|
||||
def __init__(self, coordinator, ecu, field, label=None, icon=None, unit=None, devclass=None):
|
||||
|
||||
super().__init__(coordinator)
|
||||
|
||||
self.coordinator = coordinator
|
||||
|
||||
self._ecu = ecu
|
||||
self._field = field
|
||||
self._label = label
|
||||
if not label:
|
||||
self._label = field
|
||||
self._icon = icon
|
||||
self._unit = unit
|
||||
self._devclass = devclass
|
||||
|
||||
self._name = f"ECU {self._label}"
|
||||
self._state = None
|
||||
|
||||
@property
|
||||
def unique_id(self):
|
||||
return f"{self._ecu.ecu.ecu_id}_{self._field}"
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def device_class(self):
|
||||
return self._devclass
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
#_LOGGER.warning(f"State called for {self._field}")
|
||||
return self.coordinator.data.get(self._field)
|
||||
|
||||
@property
|
||||
def icon(self):
|
||||
return self._icon
|
||||
|
||||
@property
|
||||
def unit_of_measurement(self):
|
||||
return self._unit
|
||||
|
||||
|
||||
@property
|
||||
def device_state_attributes(self):
|
||||
|
||||
attrs = {
|
||||
"ecu_id" : self._ecu.ecu.ecu_id,
|
||||
"firmware" : self._ecu.ecu.firmware,
|
||||
"last_update" : self._ecu.ecu.last_update
|
||||
}
|
||||
return attrs
|
||||
|
||||
|
||||
|
5
server/apsystems_ecur/services.yaml
Normal file
5
server/apsystems_ecur/services.yaml
Normal file
|
@ -0,0 +1,5 @@
|
|||
start_query:
|
||||
description: Start querying the ECU for new data (i.e. when sunrise)
|
||||
|
||||
stop_query:
|
||||
description: Stop querying the ECU for new data (i.e. when sunset)
|
25
server/apsystems_ecur/test.py
Executable file
25
server/apsystems_ecur/test.py
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
from APSystemsECUR import APSystemsECUR
|
||||
import time
|
||||
import asyncio
|
||||
from pprint import pprint
|
||||
|
||||
ecu_ip = "192.168.0.251"
|
||||
sleep = 60
|
||||
|
||||
loop = asyncio.get_event_loop()
|
||||
ecu = APSystemsECUR(ecu_ip)
|
||||
|
||||
while True:
|
||||
try:
|
||||
data = loop.run_until_complete(ecu.async_query_ecu())
|
||||
print(f"[OK] Timestamp: {data.get('timestamp')} Current Power: {data.get('current_power')}")
|
||||
pprint(data)
|
||||
except Exception as err:
|
||||
print(f"[ERROR] {err}")
|
||||
|
||||
print(f"Sleeping for {sleep} sec")
|
||||
time.sleep(sleep)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user