parent
40eba650da
commit
c52268be2c
10 changed files with 236 additions and 622 deletions
@ -1,154 +0,0 @@ |
||||
import logging |
||||
|
||||
import voluptuous as vol |
||||
import traceback |
||||
from datetime import timedelta |
||||
|
||||
from .APSystemsECUR import APSystemsECUR, APSystemsInvalidData |
||||
from homeassistant.helpers.discovery import load_platform |
||||
import homeassistant.helpers.config_validation as cv |
||||
from homeassistant.const import CONF_HOST |
||||
from homeassistant.helpers.entity import Entity |
||||
from homeassistant.util import Throttle |
||||
|
||||
from homeassistant.helpers.update_coordinator import ( |
||||
CoordinatorEntity, |
||||
DataUpdateCoordinator, |
||||
UpdateFailed, |
||||
) |
||||
|
||||
_LOGGER = logging.getLogger(__name__) |
||||
|
||||
from .const import DOMAIN |
||||
|
||||
CONF_INTERVAL = "interval" |
||||
|
||||
CONFIG_SCHEMA = vol.Schema({ |
||||
DOMAIN : vol.Schema({ |
||||
vol.Required(CONF_HOST): cv.string, |
||||
vol.Optional(CONF_INTERVAL) : cv.time_period_seconds |
||||
}) |
||||
}, extra=vol.ALLOW_EXTRA) |
||||
|
||||
PLATFORMS = [ "sensor", "binary_sensor" ] |
||||
|
||||
|
||||
## handle all the communications with the ECUR class and deal with our need for caching, etc |
||||
class ECUR(): |
||||
|
||||
def __init__(self, ipaddr): |
||||
self.ecu = APSystemsECUR(ipaddr) |
||||
self.cache_count = 0 |
||||
self.cache_max = 5 |
||||
self.data_from_cache = False |
||||
self.querying = True |
||||
self.error_messge = "" |
||||
self.cached_data = {} |
||||
|
||||
async def stop_query(self): |
||||
self.querying = False |
||||
|
||||
async def start_query(self): |
||||
self.querying = True |
||||
|
||||
async def update(self): |
||||
data = {} |
||||
|
||||
# if we aren't actively quering data, pull data form the cache |
||||
# this is so we can stop querying after sunset |
||||
if not self.querying: |
||||
|
||||
_LOGGER.debug("Not querying ECU due to stopped") |
||||
data = self.cached_data |
||||
self.data_from_cache = True |
||||
|
||||
data["data_from_cache"] = self.data_from_cache |
||||
data["querying"] = self.querying |
||||
return self.cached_data |
||||
|
||||
_LOGGER.debug("Querying ECU") |
||||
try: |
||||
data = await self.ecu.async_query_ecu() |
||||
_LOGGER.debug("Got data from ECU") |
||||
|
||||
# we got good results, so we store it and set flags about our |
||||
# cache state |
||||
self.cached_data = data |
||||
self.cache_count = 0 |
||||
self.data_from_cache = False |
||||
self.error_message = "" |
||||
|
||||
except APSystemsInvalidData as err: |
||||
|
||||
msg = f"Using cached data from last successful communication from ECU. Error: {err}" |
||||
_LOGGER.warning(msg) |
||||
|
||||
# we got invalid data, so we need to pull from cache |
||||
self.error_msg = msg |
||||
self.cache_count += 1 |
||||
self.data_from_cache = True |
||||
data = self.cached_data |
||||
|
||||
if self.cache_count > self.cache_max: |
||||
raise Exception(f"Error using cached data for more than {self.cache_max} times.") |
||||
|
||||
except Exception as err: |
||||
|
||||
msg = f"Using cached data from last successful communication from ECU. Error: {err}" |
||||
_LOGGER.warning(msg) |
||||
|
||||
# we got invalid data, so we need to pull from cache |
||||
self.error_msg = msg |
||||
self.cache_count += 1 |
||||
self.data_from_cache = True |
||||
data = self.cached_data |
||||
|
||||
if self.cache_count > self.cache_max: |
||||
raise Exception(f"Error using cached data for more than {self.cache_max} times.") |
||||
|
||||
data["data_from_cache"] = self.data_from_cache |
||||
data["querying"] = self.querying |
||||
_LOGGER.debug(f"Returning {data}") |
||||
return data |
||||
|
||||
async def async_setup(hass, config): |
||||
""" Setup the APsystems platform """ |
||||
hass.data.setdefault(DOMAIN, {}) |
||||
|
||||
host = config[DOMAIN].get(CONF_HOST) |
||||
interval = config[DOMAIN].get(CONF_INTERVAL) |
||||
if not interval: |
||||
interval = timedelta(seconds=60) |
||||
|
||||
ecu = ECUR(host) |
||||
|
||||
coordinator = DataUpdateCoordinator( |
||||
hass, |
||||
_LOGGER, |
||||
name=DOMAIN, |
||||
update_method=ecu.update, |
||||
update_interval=interval, |
||||
) |
||||
|
||||
await coordinator.async_refresh() |
||||
|
||||
hass.data[DOMAIN] = { |
||||
"ecu" : ecu, |
||||
"coordinator" : coordinator |
||||
} |
||||
|
||||
async def handle_stop_query(call): |
||||
await ecu.stop_query() |
||||
coordinator.async_refresh() |
||||
|
||||
async def handle_start_query(call): |
||||
await ecu.start_query() |
||||
coordinator.async_refresh() |
||||
|
||||
hass.services.async_register(DOMAIN, "start_query", handle_start_query) |
||||
hass.services.async_register(DOMAIN, "stop_query", handle_stop_query) |
||||
|
||||
for component in PLATFORMS: |
||||
load_platform(hass, component, DOMAIN, {}, config) |
||||
|
||||
return True |
@ -1,86 +0,0 @@ |
||||
"""Example integration using DataUpdateCoordinator.""" |
||||
|
||||
from datetime import timedelta |
||||
import logging |
||||
|
||||
import async_timeout |
||||
|
||||
from homeassistant.components.binary_sensor import ( |
||||
BinarySensorEntity, |
||||
) |
||||
|
||||
from homeassistant.helpers.update_coordinator import ( |
||||
CoordinatorEntity, |
||||
DataUpdateCoordinator, |
||||
UpdateFailed, |
||||
) |
||||
|
||||
from .const import ( |
||||
DOMAIN, |
||||
RELOAD_ICON, |
||||
CACHE_ICON |
||||
) |
||||
|
||||
_LOGGER = logging.getLogger(__name__) |
||||
|
||||
async def async_setup_platform(hass, config, add_entities, discovery_info=None): |
||||
|
||||
ecu = hass.data[DOMAIN].get("ecu") |
||||
coordinator = hass.data[DOMAIN].get("coordinator") |
||||
|
||||
|
||||
sensors = [ |
||||
APSystemsECUBinarySensor(coordinator, ecu, "data_from_cache", |
||||
label="Using Cached Data", icon=CACHE_ICON), |
||||
APSystemsECUBinarySensor(coordinator, ecu, "querying", |
||||
label="Querying Enabled", icon=RELOAD_ICON), |
||||
] |
||||
|
||||
add_entities(sensors) |
||||
|
||||
|
||||
class APSystemsECUBinarySensor(CoordinatorEntity, BinarySensorEntity): |
||||
|
||||
def __init__(self, coordinator, ecu, field, label=None, devclass=None, icon=None): |
||||
|
||||
super().__init__(coordinator) |
||||
|
||||
self.coordinator = coordinator |
||||
|
||||
self._ecu = ecu |
||||
self._field = field |
||||
self._label = label |
||||
if not label: |
||||
self._label = field |
||||
self._icon = icon |
||||
|
||||
self._name = f"ECU {self._label}" |
||||
self._state = None |
||||
|
||||
@property |
||||
def unique_id(self): |
||||
return f"{self._ecu.ecu.ecu_id}_{self._field}" |
||||
|
||||
@property |
||||
def name(self): |
||||
return self._name |
||||
|
||||
@property |
||||
def is_on(self): |
||||
return self.coordinator.data.get(self._field) |
||||
|
||||
@property |
||||
def icon(self): |
||||
return self._icon |
||||
|
||||
@property |
||||
def device_state_attributes(self): |
||||
|
||||
attrs = { |
||||
"ecu_id" : self._ecu.ecu.ecu_id, |
||||
"firmware" : self._ecu.ecu.firmware, |
||||
"last_update" : self._ecu.ecu.last_update |
||||
} |
||||
return attrs |
||||
|
||||
|
@ -1,7 +0,0 @@ |
||||
DOMAIN = 'apsystems_ecur' |
||||
SOLAR_ICON = "mdi:solar-power" |
||||
FREQ_ICON = "mdi:sine-wave" |
||||
SIGNAL_ICON = "mdi:signal" |
||||
RELOAD_ICON = "mdi:reload" |
||||
CACHE_ICON = "mdi:cached" |
||||
|
@ -1,10 +0,0 @@ |
||||
{ |
||||
"codeowners": ["@ksheumaker"], |
||||
"config_flow": false, |
||||
"dependencies": [], |
||||
"documentation": "https://github.com/ksheumaker/homeassistant-apsystems_ecur", |
||||
"domain": "apsystems_ecur", |
||||
"name": "APSystems PV solar ECU-R", |
||||
"version": "1.0.1", |
||||
"requirements": [] |
||||
} |
@ -1,208 +0,0 @@ |
||||
"""Example integration using DataUpdateCoordinator.""" |
||||
|
||||
from datetime import timedelta |
||||
import logging |
||||
|
||||
import async_timeout |
||||
|
||||
from homeassistant.helpers.entity import Entity |
||||
from homeassistant.helpers.update_coordinator import ( |
||||
CoordinatorEntity, |
||||
DataUpdateCoordinator, |
||||
UpdateFailed, |
||||
) |
||||
|
||||
from .const import ( |
||||
DOMAIN, |
||||
SOLAR_ICON, |
||||
FREQ_ICON, |
||||
SIGNAL_ICON |
||||
) |
||||
|
||||
from homeassistant.const import ( |
||||
DEVICE_CLASS_TEMPERATURE, |
||||
DEVICE_CLASS_POWER, |
||||
DEVICE_CLASS_ENERGY, |
||||
DEVICE_CLASS_VOLTAGE, |
||||
DEVICE_CLASS_ENERGY, |
||||
ENERGY_KILO_WATT_HOUR, |
||||
POWER_WATT, |
||||
VOLT, |
||||
TEMP_CELSIUS, |
||||
PERCENTAGE, |
||||
FREQUENCY_HERTZ |
||||
) |
||||
|
||||
_LOGGER = logging.getLogger(__name__) |
||||
|
||||
async def async_setup_platform(hass, config, add_entities, discovery_info=None): |
||||
|
||||
ecu = hass.data[DOMAIN].get("ecu") |
||||
coordinator = hass.data[DOMAIN].get("coordinator") |
||||
|
||||
|
||||
sensors = [ |
||||
APSystemsECUSensor(coordinator, ecu, "current_power", |
||||
label="Current Power", unit=POWER_WATT, |
||||
devclass=DEVICE_CLASS_POWER, icon=SOLAR_ICON), |
||||
APSystemsECUSensor(coordinator, ecu, "today_energy", |
||||
label="Today Energy", unit=ENERGY_KILO_WATT_HOUR, |
||||
devclass=DEVICE_CLASS_ENERGY, icon=SOLAR_ICON), |
||||
APSystemsECUSensor(coordinator, ecu, "lifetime_energy", |
||||
label="Lifetime Energy", unit=ENERGY_KILO_WATT_HOUR, |
||||
devclass=DEVICE_CLASS_ENERGY, icon=SOLAR_ICON), |
||||
] |
||||
|
||||
inverters = coordinator.data.get("inverters", {}) |
||||
for uid,inv_data in inverters.items(): |
||||
#_LOGGER.warning(f"Inverter {uid} {inv_data.get('channel_qty')}") |
||||
sensors.extend([ |
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, |
||||
"temperature", label="Temperature", |
||||
devclass=DEVICE_CLASS_TEMPERATURE, unit=TEMP_CELSIUS), |
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, |
||||
"frequency", label="Frequency", unit=FREQUENCY_HERTZ, |
||||
devclass=None, icon=FREQ_ICON), |
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, |
||||
"voltage", label="Voltage", unit=VOLT, |
||||
devclass=DEVICE_CLASS_VOLTAGE), |
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, |
||||
"signal", label="Signal", unit=PERCENTAGE, |
||||
icon=SIGNAL_ICON) |
||||
|
||||
]) |
||||
for i in range(0, inv_data.get("channel_qty", 0)): |
||||
sensors.append( |
||||
APSystemsECUInverterSensor(coordinator, ecu, uid, f"power", |
||||
index=i, label=f"Power Ch {i+1}", unit=POWER_WATT, |
||||
devclass=DEVICE_CLASS_POWER, icon=SOLAR_ICON) |
||||
) |
||||
|
||||
add_entities(sensors) |
||||
|
||||
|
||||
class APSystemsECUInverterSensor(CoordinatorEntity, Entity): |
||||
def __init__(self, coordinator, ecu, uid, field, index=0, label=None, icon=None, unit=None, devclass=None): |
||||
|
||||
super().__init__(coordinator) |
||||
|
||||
self.coordinator = coordinator |
||||
|
||||
self._index = index |
||||
self._uid = uid |
||||
self._ecu = ecu |
||||
self._field = field |
||||
self._devclass = devclass |
||||
self._label = label |
||||
if not label: |
||||
self._label = field |
||||
self._icon = icon |
||||
self._unit = unit |
||||
|
||||
self._name = f"Inverter {self._uid} {self._label}" |
||||
self._state = None |
||||
|
||||
@property |
||||
def unique_id(self): |
||||
field = self._field |
||||
if self._index != None: |
||||
field = f"{field}_{self._index}" |
||||
return f"{self._ecu.ecu.ecu_id}_{self._uid}_{field}" |
||||
|
||||
@property |
||||
def device_class(self): |
||||
return self._devclass |
||||
|
||||
@property |
||||
def name(self): |
||||
return self._name |
||||
|
||||
@property |
||||
def state(self): |
||||
#_LOGGER.warning(f"State called for {self._field}") |
||||
if self._field == "voltage": |
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get("voltage", [])[0] |
||||
elif self._field == "power": |
||||
#_LOGGER.warning(f"POWER {self._uid} {self._index}") |
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get("power", [])[self._index] |
||||
else: |
||||
return self.coordinator.data.get("inverters", {}).get(self._uid, {}).get(self._field) |
||||
|
||||
@property |
||||
def icon(self): |
||||
return self._icon |
||||
|
||||
@property |
||||
def unit_of_measurement(self): |
||||
return self._unit |
||||
|
||||
|
||||
@property |
||||
def device_state_attributes(self): |
||||
|
||||
attrs = { |
||||
"ecu_id" : self._ecu.ecu.ecu_id, |
||||
"last_update" : self._ecu.ecu.last_update, |
||||
} |
||||
return attrs |
||||
|
||||
|
||||
|
||||
class APSystemsECUSensor(CoordinatorEntity, Entity): |
||||
|
||||
def __init__(self, coordinator, ecu, field, label=None, icon=None, unit=None, devclass=None): |
||||
|
||||
super().__init__(coordinator) |
||||
|
||||
self.coordinator = coordinator |
||||
|
||||
self._ecu = ecu |
||||
self._field = field |
||||
self._label = label |
||||
if not label: |
||||
self._label = field |
||||
self._icon = icon |
||||
self._unit = unit |
||||
self._devclass = devclass |
||||
|
||||
self._name = f"ECU {self._label}" |
||||
self._state = None |
||||
|
||||
@property |
||||
def unique_id(self): |
||||
return f"{self._ecu.ecu.ecu_id}_{self._field}" |
||||
|
||||
@property |
||||
def name(self): |
||||
return self._name |
||||
|
||||
@property |
||||
def device_class(self): |
||||
return self._devclass |
||||
|
||||
@property |
||||
def state(self): |
||||
#_LOGGER.warning(f"State called for {self._field}") |
||||
return self.coordinator.data.get(self._field) |
||||
|
||||
@property |
||||
def icon(self): |
||||
return self._icon |
||||
|
||||
@property |
||||
def unit_of_measurement(self): |
||||
return self._unit |
||||
|
||||
|
||||
@property |
||||
def device_state_attributes(self): |
||||
|
||||
attrs = { |
||||
"ecu_id" : self._ecu.ecu.ecu_id, |
||||
"firmware" : self._ecu.ecu.firmware, |
||||
"last_update" : self._ecu.ecu.last_update |
||||
} |
||||
return attrs |
||||
|
||||
|
||||
|
@ -1,5 +0,0 @@ |
||||
start_query: |
||||
description: Start querying the ECU for new data (i.e. when sunrise) |
||||
|
||||
stop_query: |
||||
description: Stop querying the ECU for new data (i.e. when sunset) |
@ -1,25 +0,0 @@ |
||||
#!/usr/bin/env python3 |
||||
|
||||
from APSystemsECUR import APSystemsECUR |
||||
import time |
||||
import asyncio |
||||
from pprint import pprint |
||||
|
||||
ecu_ip = "192.168.0.251" |
||||
sleep = 60 |
||||
|
||||
loop = asyncio.get_event_loop() |
||||
ecu = APSystemsECUR(ecu_ip) |
||||
|
||||
while True: |
||||
try: |
||||
data = loop.run_until_complete(ecu.async_query_ecu()) |
||||
print(f"[OK] Timestamp: {data.get('timestamp')} Current Power: {data.get('current_power')}") |
||||
pprint(data) |
||||
except Exception as err: |
||||
print(f"[ERROR] {err}") |
||||
|
||||
print(f"Sleeping for {sleep} sec") |
||||
time.sleep(sleep) |
||||
|
||||
|
@ -1,30 +1,165 @@ |
||||
import os |
||||
import logging |
||||
logging.basicConfig( |
||||
format='[%(asctime)s] %(levelname)s %(module)s/%(funcName)s: - %(message)s', |
||||
level=logging.DEBUG if os.environ.get('DEBUG') else logging.INFO) |
||||
logging.getLogger('aiohttp').setLevel(logging.DEBUG if os.environ.get('DEBUG') else logging.WARNING) |
||||
|
||||
import asyncio |
||||
from aiohttp import web |
||||
from datetime import datetime, timedelta |
||||
import pytz |
||||
|
||||
from APSystemsECUR import APSystemsECUR |
||||
|
||||
ECU_IP = '192.168.69.103' |
||||
LISTEN_IP = '192.168.69.100' |
||||
ecu = APSystemsECUR(ECU_IP) |
||||
app = web.Application() |
||||
prev_ecu_timestamp = None |
||||
solar_data = {} |
||||
|
||||
from influxdb import InfluxDBClient |
||||
client = InfluxDBClient('localhost', 8086, database='solar2') |
||||
|
||||
async def proxy(reader, writer): |
||||
message = await reader.read(1024) |
||||
addr = writer.get_extra_info('peername') |
||||
|
||||
try: |
||||
print('Recvd from {}: {}'.format(addr[0], message)) |
||||
logging.debug('Recvd from {}: {}'.format(addr[0], message)) |
||||
offset = 32 |
||||
date_time_obj = datetime.strptime(str(message)[offset:offset+14], '%Y%m%d%H%M%S') + timedelta(minutes=-5) |
||||
send_str = '101' + datetime.strftime(date_time_obj, '%Y%m%d%H%M%S') |
||||
send_data = send_str.encode() |
||||
print('Sending to {}: {}'.format(addr[0], send_data)) |
||||
logging.debug('Sending to {}: {}'.format(addr[0], send_data)) |
||||
writer.write(send_data) |
||||
await writer.drain() |
||||
except ValueError: |
||||
print('Ignored unnecessary data') |
||||
logging.debug('Ignored unnecessary data') |
||||
|
||||
writer.close() |
||||
|
||||
async def main(): |
||||
async def run_proxies(): |
||||
for port in [8995, 8996, 8997]: |
||||
server = await asyncio.start_server(proxy, '192.168.69.69', port) |
||||
server = await asyncio.start_server(proxy, LISTEN_IP, port) |
||||
logging.info('Started TCP listener server on %s:%s', LISTEN_IP, port) |
||||
task = asyncio.create_task(server.serve_forever()) |
||||
|
||||
# block here for now |
||||
await task |
||||
async def get_data(): |
||||
global prev_ecu_timestamp |
||||
global solar_data |
||||
|
||||
await asyncio.sleep(1) |
||||
|
||||
while True: |
||||
try: |
||||
logging.debug('Grabbing ECU data...') |
||||
data = ecu.query_ecu() |
||||
logging.debug('Good read, timestamp: %s Mountain Time, current power: %s', data['timestamp'], data['current_power']) |
||||
|
||||
if data['timestamp'] != prev_ecu_timestamp: |
||||
total = 0 |
||||
timestamp = datetime.utcnow() |
||||
|
||||
for i in data['inverters'].values(): |
||||
total += i['power'][0] |
||||
total += i['power'][1] |
||||
data['actual_total'] = total |
||||
|
||||
solar_data = data |
||||
logging.info('Solar data updated, ecu time: %s Mountain, ecu total: %s, actual total: %s', data['timestamp'], data['current_power'], total) |
||||
|
||||
points = [] |
||||
for i in data['inverters'].values(): |
||||
points.append({ |
||||
'time': timestamp, |
||||
'measurement': 'inverter', |
||||
'tags': {'ecu': data['ecu_id'], 'inverter': i['uid']}, |
||||
'fields': { |
||||
'ecu_time': data['timestamp'], |
||||
'online': i['online'], |
||||
'frequency': i['frequency'], |
||||
'temperature': i['temperature'], |
||||
} |
||||
}) |
||||
|
||||
points.append({ |
||||
'time': timestamp, |
||||
'measurement': 'panel', |
||||
'tags': {'ecu': data['ecu_id'], 'inverter': i['uid'], 'channel': '0'}, |
||||
'fields': { |
||||
'ecu_time': data['timestamp'], |
||||
'power': i['power'][0], |
||||
'voltage': i['voltage'][0] |
||||
} |
||||
}) |
||||
|
||||
points.append({ |
||||
'time': timestamp, |
||||
'measurement': 'panel', |
||||
'tags': {'ecu': data['ecu_id'], 'inverter': i['uid'], 'channel': '1'}, |
||||
'fields': { |
||||
'ecu_time': data['timestamp'], |
||||
'power': i['power'][1], |
||||
'voltage': i['voltage'][1] |
||||
} |
||||
}) |
||||
|
||||
points.append({ |
||||
'time': timestamp, |
||||
'measurement': 'ecu', |
||||
'tags': {'ecu': data['ecu_id']}, |
||||
'fields': { |
||||
'ecu_total': data['current_power'], |
||||
'ecu_time': data['timestamp'], |
||||
'actual_total': data['actual_total'], |
||||
'today_energy': data['today_energy'], |
||||
'lifetime_energy': data['lifetime_energy'], |
||||
} |
||||
}) |
||||
|
||||
client.write_points(points) |
||||
|
||||
logging.info('Wrote %s points to InfluxDB', len(points)) |
||||
|
||||
|
||||
prev_ecu_timestamp = data['timestamp'] |
||||
|
||||
except Exception as err: |
||||
logging.error('Error: ' + str(err)) |
||||
|
||||
await asyncio.sleep(120) |
||||
|
||||
async def index(request): |
||||
return web.Response(text='hello world', content_type='text/html') |
||||
|
||||
async def data(request): |
||||
return web.json_response(solar_data) |
||||
|
||||
async def history(request): |
||||
try: |
||||
date = datetime.strptime(request.match_info['date'], '%Y-%m-%d') |
||||
tz = pytz.timezone('America/Edmonton') |
||||
date = tz.localize(date) |
||||
except ValueError: |
||||
raise web.HTTPNotFound |
||||
|
||||
start = int(date.timestamp()) |
||||
end = date + timedelta(days=1) |
||||
end = int(end.timestamp()) |
||||
|
||||
q = 'select * from ecu where time >= {}s and time < {}s'.format(start, end) |
||||
history = list(client.query(q).get_points()) |
||||
|
||||
return web.json_response(history) |
||||
|
||||
if __name__ == '__main__': |
||||
app.router.add_get('/', index) |
||||
app.router.add_get('/data', data) |
||||
app.router.add_get('/history/{date}', history) |
||||
|
||||
asyncio.run(main()) |
||||
loop = asyncio.get_event_loop() |
||||
loop.create_task(run_proxies()) |
||||
loop.create_task(get_data()) |
||||
web.run_app(app, port=6901) |
||||
|
@ -0,0 +1,18 @@ |
||||
aiohttp==3.7.4.post0 |
||||
async-timeout==3.0.1 |
||||
attrs==21.2.0 |
||||
certifi==2021.5.30 |
||||
chardet==4.0.0 |
||||
ciso8601==2.1.3 |
||||
idna==2.10 |
||||
influxdb==5.3.1 |
||||
msgpack==1.0.2 |
||||
multidict==5.1.0 |
||||
pkg-resources==0.0.0 |
||||
python-dateutil==2.8.1 |
||||
pytz==2021.1 |
||||
requests==2.25.1 |
||||
six==1.16.0 |
||||
typing-extensions==3.10.0.0 |
||||
urllib3==1.26.5 |
||||
yarl==1.6.3 |
Loading…
Reference in new issue