* #203 * Update docker compose to specify influxdb:1.8.4 * Update requirements to use urllib3==1.26.5 * updated to support Radarr and Sonarr V3 Api * bump requirements for requests * Fix Sonarr & Radarr V3 API /queue endpoint (#220) * Fix lint issues * More lint fixes * Update Sonarr structures * Add Overseerr Support (#210) * Remove duplicate structures * update changelog to reflect v1.7.7 changes * Add IP data to tautulli #202 * add missing ip address in tautulli * Fixed: Streamlined API calls to Radarr and Sonarr (#221) * Fixed: Sonarr Data pull issues (#222) * Fix Sonarrr calendar * Update lidarr structure (#225) Added missing arguments to Lidarr structure Fixes #223 * Clean up request totals. Upstream change sct/overseerr#2426 * Cleanup blank space * Fix requested_date syntax. * Fix requested_date for Overseerr tv and movie * Fix overseerr config refernces * Fix overseerr structures * Update intparser to accommodate changes to config structure * Cleanup overseerr data collection * Fix SERVICES_ENABLED in varken.py to acomidate overseerr * Fixed: Sonarr/Lidarr Queues (#227) * Change sonarr queue structures to str * Fixed: Multipage queue fetching * Update historical tautulli import (#226) * Fixed: Sonarr perams ordering * Fixed: Proper warnings for missing data in sonarr and radarr * Added: Overseerr ENVs to docker compose. * Added: Logging to empty/no data returns * Update Sonarr & Lidarr Structs to match latest API changes (#231) * Add support for estimatedCompletionTime in LidarrQueue * Add support for tvdbId in SonarrEpisode struct * Fix typo in docker yml * Rename example url for overseerr in docker yml * Update radarr structures to inclue originalLanguage * Update radarr structures to include addOptions * Update radarr structures to include popularity * fix(ombi): Update structures.py (#238) * feat(docker): remove envs from example * fix(logging): remove depreciation warning. Var for debug mode (#240) * fix(build): bump schedule version to 1.1 * fix(build): bump docker python version * fix(dep): update requests and urllib3 * fix(sonarr): ensure invalid sonarr queue items are just skipped over - fixes #239 (#243) * add branch to build inputs * update pipeline badge * Update automation * Add influxdb 2 client * Add structure for influxdb 2 params This contains all the data needed for connecting and writing to an InfluxDB2 server * Parse influxdb 2 config data * Add influxdb2 manager class This stores the data needed for InfluxDB2, and has a single `write_points` function on this that takes an array of points to add to the database * Use the correct db manager for varken * Add influxdb2 to the example varken config file * Create influx bucket if it doesn't exist * Update InfluxDB type on README * Clean up linting errors * Wrap create bucket in try/catch * Use bucket given in ini file * Log exception to troubleshoot errors * Allow configured influx2 address as URL (no port) * Bypass validity check to troubleshoot --------- Co-authored-by: mal5305 <malcolm.e.rogers@gmail.com> Co-authored-by: samwiseg0 <2241731+samwiseg0@users.noreply.github.com> Co-authored-by: Robin <19610103+RobinDadswell@users.noreply.github.com> Co-authored-by: tigattack <10629864+tigattack@users.noreply.github.com> Co-authored-by: Stewart Thomson <stewartthomson3@gmail.com> Co-authored-by: Cameron Stephen <mail@cajs.co.uk> Co-authored-by: MDHMatt <10845262+MDHMatt@users.noreply.github.com> Co-authored-by: Nathan Adams <dinnerbone@dinnerbone.com> Co-authored-by: Nicholas St. Germain <nick@cajun.pro> Co-authored-by: Gabe Revells <gcrevell@mtu.edu>
207 lines
9.2 KiB
Python
207 lines
9.2 KiB
Python
import platform
|
|
import schedule
|
|
import distro
|
|
from time import sleep
|
|
from queue import Queue
|
|
from sys import version
|
|
from threading import Thread
|
|
from os import environ as env
|
|
from os import access, R_OK, getenv
|
|
from os.path import isdir, abspath, dirname, join
|
|
from argparse import ArgumentParser, RawTextHelpFormatter
|
|
from logging import getLogger, StreamHandler, Formatter, DEBUG
|
|
|
|
|
|
# Needed to check version of python
|
|
from varken import structures # noqa
|
|
from varken.ombi import OmbiAPI
|
|
from varken.overseerr import OverseerrAPI
|
|
from varken.unifi import UniFiAPI
|
|
from varken import VERSION, BRANCH, BUILD_DATE
|
|
from varken.sonarr import SonarrAPI
|
|
from varken.radarr import RadarrAPI
|
|
from varken.lidarr import LidarrAPI
|
|
from varken.iniparser import INIParser
|
|
from varken.dbmanager import DBManager
|
|
from varken.influxdb2manager import InfluxDB2Manager
|
|
from varken.helpers import GeoIPHandler
|
|
from varken.tautulli import TautulliAPI
|
|
from varken.sickchill import SickChillAPI
|
|
from varken.varkenlogger import VarkenLogger
|
|
|
|
|
|
PLATFORM_LINUX_DISTRO = ' '.join(distro.id() + distro.version() + distro.name())
|
|
|
|
|
|
def thread(job, **kwargs):
|
|
worker = Thread(target=job, kwargs=dict(**kwargs))
|
|
worker.start()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
parser = ArgumentParser(prog='varken',
|
|
description='Command-line utility to aggregate data from the plex ecosystem into InfluxDB',
|
|
formatter_class=RawTextHelpFormatter)
|
|
|
|
parser.add_argument("-d", "--data-folder", help='Define an alternate data folder location')
|
|
parser.add_argument("-D", "--debug", action='store_true', help='Use to enable DEBUG logging. (Depreciated)')
|
|
parser.add_argument("-ND", "--no_debug", action='store_true', help='Use to disable DEBUG logging')
|
|
|
|
opts = parser.parse_args()
|
|
|
|
templogger = getLogger('temp')
|
|
templogger.setLevel(DEBUG)
|
|
tempch = StreamHandler()
|
|
tempformatter = Formatter('%(asctime)s : %(levelname)s : %(module)s : %(message)s', '%Y-%m-%d %H:%M:%S')
|
|
tempch.setFormatter(tempformatter)
|
|
templogger.addHandler(tempch)
|
|
|
|
DATA_FOLDER = env.get('DATA_FOLDER', vars(opts).get('data_folder') or abspath(join(dirname(__file__), 'data')))
|
|
|
|
if isdir(DATA_FOLDER):
|
|
if not access(DATA_FOLDER, R_OK):
|
|
templogger.error("Read permission error for %s", DATA_FOLDER)
|
|
exit(1)
|
|
else:
|
|
templogger.error("%s does not exist", DATA_FOLDER)
|
|
exit(1)
|
|
|
|
# Set Debug to True if DEBUG env is set
|
|
enable_opts = ['True', 'true', 'yes']
|
|
debug_opts = ['debug', 'Debug', 'DEBUG']
|
|
|
|
opts.debug = True
|
|
|
|
if getenv('DEBUG') is not None:
|
|
opts.debug = True if any([getenv(string, False) for true in enable_opts
|
|
for string in debug_opts if getenv(string, False) == true]) else False
|
|
|
|
elif opts.no_debug:
|
|
opts.debug = False
|
|
|
|
# Initiate the logger
|
|
vl = VarkenLogger(data_folder=DATA_FOLDER, debug=opts.debug)
|
|
vl.logger.info('Starting Varken...')
|
|
|
|
vl.logger.info('Data folder is "%s"', DATA_FOLDER)
|
|
|
|
vl.logger.info(u"%s %s (%s%s)", platform.system(), platform.release(), platform.version(),
|
|
' - ' + PLATFORM_LINUX_DISTRO if PLATFORM_LINUX_DISTRO else '')
|
|
|
|
vl.logger.info(u"Python %s", version)
|
|
|
|
vl.logger.info("Varken v%s-%s %s", VERSION, BRANCH, BUILD_DATE)
|
|
|
|
CONFIG = INIParser(DATA_FOLDER)
|
|
|
|
if CONFIG.influx2_enabled:
|
|
# Use INFLUX version 2
|
|
vl.logger.info('Using INFLUXDBv2')
|
|
DBMANAGER = InfluxDB2Manager(CONFIG.influx_server)
|
|
else:
|
|
vl.logger.info('Using INFLUXDB')
|
|
DBMANAGER = DBManager(CONFIG.influx_server)
|
|
|
|
QUEUE = Queue()
|
|
|
|
if CONFIG.sonarr_enabled:
|
|
for server in CONFIG.sonarr_servers:
|
|
SONARR = SonarrAPI(server, DBMANAGER)
|
|
if server.queue:
|
|
at_time = schedule.every(server.queue_run_seconds).seconds
|
|
at_time.do(thread, SONARR.get_queue).tag("sonarr-{}-get_queue".format(server.id))
|
|
if server.missing_days > 0:
|
|
at_time = schedule.every(server.missing_days_run_seconds).seconds
|
|
at_time.do(thread, SONARR.get_calendar, query="Missing").tag("sonarr-{}-get_missing".format(server.id))
|
|
if server.future_days > 0:
|
|
at_time = schedule.every(server.future_days_run_seconds).seconds
|
|
at_time.do(thread, SONARR.get_calendar, query="Future").tag("sonarr-{}-get_future".format(server.id))
|
|
|
|
if CONFIG.tautulli_enabled:
|
|
GEOIPHANDLER = GeoIPHandler(DATA_FOLDER, CONFIG.tautulli_servers[0].maxmind_license_key)
|
|
schedule.every(12).to(24).hours.do(thread, GEOIPHANDLER.update)
|
|
for server in CONFIG.tautulli_servers:
|
|
TAUTULLI = TautulliAPI(server, DBMANAGER, GEOIPHANDLER)
|
|
if server.get_activity:
|
|
at_time = schedule.every(server.get_activity_run_seconds).seconds
|
|
at_time.do(thread, TAUTULLI.get_activity).tag("tautulli-{}-get_activity".format(server.id))
|
|
if server.get_stats:
|
|
at_time = schedule.every(server.get_stats_run_seconds).seconds
|
|
at_time.do(thread, TAUTULLI.get_stats).tag("tautulli-{}-get_stats".format(server.id))
|
|
|
|
if CONFIG.radarr_enabled:
|
|
for server in CONFIG.radarr_servers:
|
|
RADARR = RadarrAPI(server, DBMANAGER)
|
|
if server.get_missing:
|
|
at_time = schedule.every(server.get_missing_run_seconds).seconds
|
|
at_time.do(thread, RADARR.get_missing).tag("radarr-{}-get_missing".format(server.id))
|
|
if server.queue:
|
|
at_time = schedule.every(server.queue_run_seconds).seconds
|
|
at_time.do(thread, RADARR.get_queue).tag("radarr-{}-get_queue".format(server.id))
|
|
|
|
if CONFIG.lidarr_enabled:
|
|
for server in CONFIG.lidarr_servers:
|
|
LIDARR = LidarrAPI(server, DBMANAGER)
|
|
if server.queue:
|
|
at_time = schedule.every(server.queue_run_seconds).seconds
|
|
at_time.do(thread, LIDARR.get_queue).tag("lidarr-{}-get_queue".format(server.id))
|
|
if server.missing_days > 0:
|
|
at_time = schedule.every(server.missing_days_run_seconds).seconds
|
|
at_time.do(thread, LIDARR.get_calendar, query="Missing").tag(
|
|
"lidarr-{}-get_missing".format(server.id))
|
|
if server.future_days > 0:
|
|
at_time = schedule.every(server.future_days_run_seconds).seconds
|
|
at_time.do(thread, LIDARR.get_calendar, query="Future").tag("lidarr-{}-get_future".format(
|
|
server.id))
|
|
|
|
if CONFIG.ombi_enabled:
|
|
for server in CONFIG.ombi_servers:
|
|
OMBI = OmbiAPI(server, DBMANAGER)
|
|
if server.request_type_counts:
|
|
at_time = schedule.every(server.request_type_run_seconds).seconds
|
|
at_time.do(thread, OMBI.get_request_counts).tag("ombi-{}-get_request_counts".format(server.id))
|
|
if server.request_total_counts:
|
|
at_time = schedule.every(server.request_total_run_seconds).seconds
|
|
at_time.do(thread, OMBI.get_all_requests).tag("ombi-{}-get_all_requests".format(server.id))
|
|
if server.issue_status_counts:
|
|
at_time = schedule.every(server.issue_status_run_seconds).seconds
|
|
at_time.do(thread, OMBI.get_issue_counts).tag("ombi-{}-get_issue_counts".format(server.id))
|
|
|
|
if CONFIG.overseerr_enabled:
|
|
for server in CONFIG.overseerr_servers:
|
|
OVERSEER = OverseerrAPI(server, DBMANAGER)
|
|
if server.get_request_total_counts:
|
|
at_time = schedule.every(server.request_total_run_seconds).seconds
|
|
at_time.do(thread, OVERSEER.get_request_counts).tag("overseerr-{}-get_request_counts"
|
|
.format(server.id))
|
|
if server.num_latest_requests_to_fetch > 0:
|
|
at_time = schedule.every(server.num_latest_requests_seconds).seconds
|
|
at_time.do(thread, OVERSEER.get_latest_requests).tag("overseerr-{}-get_latest_requests"
|
|
.format(server.id))
|
|
|
|
if CONFIG.sickchill_enabled:
|
|
for server in CONFIG.sickchill_servers:
|
|
SICKCHILL = SickChillAPI(server, DBMANAGER)
|
|
if server.get_missing:
|
|
at_time = schedule.every(server.get_missing_run_seconds).seconds
|
|
at_time.do(thread, SICKCHILL.get_missing).tag("sickchill-{}-get_missing".format(server.id))
|
|
|
|
if CONFIG.unifi_enabled:
|
|
for server in CONFIG.unifi_servers:
|
|
UNIFI = UniFiAPI(server, DBMANAGER)
|
|
at_time = schedule.every(server.get_usg_stats_run_seconds).seconds
|
|
at_time.do(thread, UNIFI.get_usg_stats).tag("unifi-{}-get_usg_stats".format(server.id))
|
|
|
|
# Run all on startup
|
|
SERVICES_ENABLED = [CONFIG.ombi_enabled, CONFIG.radarr_enabled, CONFIG.tautulli_enabled, CONFIG.unifi_enabled,
|
|
CONFIG.sonarr_enabled, CONFIG.sickchill_enabled, CONFIG.lidarr_enabled,
|
|
CONFIG.overseerr_enabled]
|
|
if not [enabled for enabled in SERVICES_ENABLED if enabled]:
|
|
vl.logger.error("All services disabled. Exiting")
|
|
exit(1)
|
|
|
|
schedule.run_all()
|
|
|
|
while schedule.jobs:
|
|
schedule.run_pending()
|
|
sleep(1)
|