1
0
Fork 0
mirror of https://gitlab.com/SIGBUS/nyaa.git synced 2024-12-22 15:10:01 +00:00

sync_es: die when killed

This commit is contained in:
queue 2017-05-28 19:46:38 -06:00
parent d89f74893b
commit 87db2e9bae

View file

@ -24,6 +24,10 @@ database into es, at the expense of redoing a (small) amount of indexing.
This uses multithreading so we don't have to block on socket io (both binlog This uses multithreading so we don't have to block on socket io (both binlog
reading and es POSTing). asyncio soon reading and es POSTing). asyncio soon
This script will exit on any sort of exception, so you'll want to use your
supervisor's restart functionality, e.g. Restart=failure in systemd, or
the poor man's `while true; do sync_es.py; sleep 1; done` in tmux.
""" """
from elasticsearch import Elasticsearch from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk, BulkIndexError from elasticsearch.helpers import bulk, BulkIndexError
@ -132,14 +136,31 @@ def delet_this(row, index_name):
'_type': 'torrent', '_type': 'torrent',
'_id': str(row['values']['id'])} '_id': str(row['values']['id'])}
# we could try to make this script robust to errors from es or mysql, but since
# the only thing we can do is "clear state and retry", it's easier to leave
# this to the supervisor. If we we carrying around heavier state in-process,
# it'd be more worth it to handle errors ourselves.
#
# Apparently there's no setDefaultUncaughtExceptionHandler in threading, and
# sys.excepthook is also broken, so this gives us the same
# exit-if-anything-happens semantics.
class ExitingThread(Thread):
def run(self):
try:
self.run_happy()
except:
log.exception("something happened")
# sys.exit only exits the thread, lame
import os
os._exit(1)
class BinlogReader(Thread): class BinlogReader(ExitingThread):
# write_buf is the Queue we communicate with # write_buf is the Queue we communicate with
def __init__(self, write_buf): def __init__(self, write_buf):
Thread.__init__(self) Thread.__init__(self)
self.write_buf = write_buf self.write_buf = write_buf
def run(self): def run_happy(self):
with open(SAVE_LOC) as f: with open(SAVE_LOC) as f:
pos = json.load(f) pos = json.load(f)
@ -228,7 +249,7 @@ class BinlogReader(Thread):
else: else:
raise Exception(f"unknown table {s.table}") raise Exception(f"unknown table {s.table}")
class EsPoster(Thread): class EsPoster(ExitingThread):
# read_buf is the queue of stuff to bulk post # read_buf is the queue of stuff to bulk post
def __init__(self, read_buf, chunk_size=1000, flush_interval=5): def __init__(self, read_buf, chunk_size=1000, flush_interval=5):
Thread.__init__(self) Thread.__init__(self)
@ -236,7 +257,7 @@ class EsPoster(Thread):
self.chunk_size = chunk_size self.chunk_size = chunk_size
self.flush_interval = flush_interval self.flush_interval = flush_interval
def run(self): def run_happy(self):
es = Elasticsearch(timeout=30) es = Elasticsearch(timeout=30)
last_save = time.time() last_save = time.time()