annotate myrss/myrss_app.py @ 121:26cc1a16e7a3

laterlinks3: fix gcs_upload() function
author paulo
date Sat, 16 Jan 2021 10:14:02 +0000
parents e2817e789895
children
rev   line source
paulo@39 1 import os
paulo@40 2 import sys
paulo@39 3 import re
paulo@40 4 import urllib2
paulo@40 5 import threading
paulo@40 6 import Queue
paulo@41 7 import datetime
paulo@41 8 import time
paulo@70 9 import traceback
paulo@98 10 import StringIO
paulo@47 11
paulo@42 12 import logging
paulo@94 13 #logging.basicConfig(
paulo@94 14 # level=logging.DEBUG,
paulo@94 15 # filename="_LOG",
paulo@94 16 # format="%(asctime)s %(levelname)-8s %(message)s",
paulo@94 17 #)
paulo@39 18
paulo@47 19 import xml.etree.ElementTree
paulo@47 20 import HTMLParser
paulo@47 21
paulo@39 22 import html
paulo@39 23
paulo@39 24
paulo@41 25 FEEDS_FILE = "FEEDS"
paulo@41 26 CACHE_HTML_FILE = "__cache__.html"
paulo@41 27
paulo@44 28 CACHE_LIFE = 1200 # [seconds]
paulo@47 29 MAX_ITEMS = 50
paulo@39 30 MAX_LINK_Z = 4
paulo@40 31 MAX_THREADS = 20
paulo@104 32 URLOPEN_TIMEOUT = 10 # [seconds]
paulo@39 33
paulo@39 34
paulo@39 35 _PARSE_ROOT_TAG_RE = re.compile(r"(\{(.+)\})?(.+)")
paulo@39 36
paulo@39 37 def _parse_root_tag(root_tag):
paulo@39 38 re_match = _PARSE_ROOT_TAG_RE.match(root_tag)
paulo@39 39
paulo@39 40 if re_match is None:
paulo@39 41 return (None, None)
paulo@39 42 else:
paulo@39 43 return re_match.group(2, 3)
paulo@39 44
paulo@39 45
paulo@47 46 def _strip_if_not_none(txt):
paulo@47 47 return txt.strip() if txt is not None else ''
paulo@47 48
paulo@47 49
paulo@39 50 def _go_rss(elementTree):
paulo@47 51 title = _strip_if_not_none(elementTree.find("channel/title").text)
paulo@39 52 link = elementTree.find("channel/link").text
paulo@39 53
paulo@39 54 items = []
paulo@39 55
paulo@39 56 for i in elementTree.findall("channel/item")[:MAX_ITEMS]:
paulo@47 57 it_title = _strip_if_not_none(i.find("title").text)
paulo@39 58 it_link = i.find("link").text
paulo@39 59
paulo@39 60 items.append((it_title, it_link))
paulo@39 61
paulo@39 62 return (title, link, items)
paulo@39 63
paulo@39 64
paulo@39 65 def _go_atom(elementTree):
paulo@39 66 ns = "http://www.w3.org/2005/Atom"
paulo@39 67
paulo@47 68 title = _strip_if_not_none(elementTree.find("{%s}title" % ns).text)
paulo@39 69 link = ''
paulo@39 70
paulo@76 71 links = elementTree.findall("{%s}link" % ns)
paulo@76 72 for i in links:
paulo@76 73 if len(links) == 1 or i.get("rel") == "alternate":
paulo@39 74 link = i.get("href")
paulo@39 75 break
paulo@39 76
paulo@39 77 items = []
paulo@39 78
paulo@39 79 for i in elementTree.findall("{%s}entry" % ns)[:MAX_ITEMS]:
paulo@47 80 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@39 81 it_link = ''
paulo@39 82
paulo@76 83 it_links = i.findall("{%s}link" % ns)
paulo@76 84 for j in it_links:
paulo@76 85 if len(it_links) == 1 or j.get("rel") == "alternate":
paulo@39 86 it_link = j.get("href")
paulo@39 87 break
paulo@39 88
paulo@39 89 items.append((it_title, it_link))
paulo@39 90
paulo@39 91 return (title, link, items)
paulo@39 92
paulo@39 93
paulo@69 94 def _go_purl_rss(elementTree):
paulo@69 95 ns = "http://purl.org/rss/1.0/"
paulo@69 96
paulo@69 97 title = _strip_if_not_none(elementTree.find("{%s}channel/{%s}title" % (ns, ns)).text)
paulo@69 98 link = elementTree.find("{%s}channel/{%s}link" % (ns, ns)).text
paulo@69 99
paulo@69 100 items = []
paulo@69 101
paulo@69 102 for i in elementTree.findall("{%s}item" % ns)[:MAX_ITEMS]:
paulo@69 103 it_title = _strip_if_not_none(i.find("{%s}title" % ns).text)
paulo@69 104 it_link = i.find("{%s}link" % ns).text
paulo@69 105
paulo@69 106 items.append((it_title, it_link))
paulo@69 107
paulo@69 108 return (title, link, items)
paulo@69 109
paulo@69 110
paulo@47 111 _STRIP_HTML_RE = re.compile(r"<.*?>")
paulo@47 112 _htmlParser = HTMLParser.HTMLParser()
paulo@47 113
paulo@47 114 def _strip_html(txt):
paulo@47 115 return _htmlParser.unescape(_STRIP_HTML_RE.sub('', txt))
paulo@47 116
paulo@47 117
paulo@41 118 def _to_html(dtnow, docstruct):
paulo@41 119 datetime_str = dtnow.strftime("%Y-%m-%d %H:%M %Z")
paulo@41 120 page_title = "myrss -- %s" % datetime_str
paulo@41 121
paulo@42 122 root = html.HTML("html")
paulo@39 123
paulo@96 124 header = root.head
paulo@97 125 header.meta(name="viewport", content="width=device-width, initial-scale=1")
paulo@41 126 header.title(page_title)
paulo@39 127 header.link(rel="stylesheet", type="text/css", href="index.css")
paulo@39 128
paulo@41 129 body = root.body
paulo@41 130 body.h1(page_title)
paulo@41 131
paulo@39 132 link_z = 0
paulo@39 133
paulo@39 134 for feed in docstruct:
paulo@40 135 if feed is None:
paulo@40 136 continue
paulo@40 137
paulo@39 138 (title, link, items) = feed
paulo@39 139
paulo@97 140 logging.debug("title: %s", title)
paulo@47 141 body.h2.a(_strip_html(title), href=link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@39 142 link_z += 1
paulo@41 143 p = body.p
paulo@39 144
paulo@39 145 for (i, (it_title, it_link)) in enumerate(items):
paulo@39 146 if i > 0:
paulo@39 147 p += " - "
paulo@39 148
paulo@72 149 if not it_title:
paulo@72 150 it_title = "(missing title)"
paulo@72 151 if it_link is not None:
paulo@72 152 p.a(_strip_html(it_title), href=it_link, klass="z%d" % (link_z % MAX_LINK_Z))
paulo@72 153 else:
paulo@72 154 p += _strip_html(it_title)
paulo@72 155
paulo@39 156 link_z += 1
paulo@39 157
paulo@46 158 dtdelta = datetime.datetime.now() - dtnow
paulo@46 159 root.div("%.3f" % (dtdelta.days*86400 + dtdelta.seconds + dtdelta.microseconds/1e6), klass="debug")
paulo@46 160
paulo@39 161 return unicode(root).encode("utf-8")
paulo@39 162
paulo@39 163
paulo@47 164 def _fetch_url(url):
paulo@40 165 try:
paulo@42 166 logging.info("processing %s" % url)
paulo@88 167 feed = urllib2.urlopen(urllib2.Request(url, headers={"User-Agent": "Mozilla/5.0 Browser"}), timeout=URLOPEN_TIMEOUT)
paulo@40 168 except urllib2.HTTPError as e:
paulo@42 169 logging.info("(%s) %s" % (url, e))
paulo@47 170 return None
paulo@47 171
paulo@98 172 return feed.read()
paulo@98 173
paulo@98 174
paulo@98 175 def _filter_feed(feed):
paulo@98 176 ret = feed
paulo@98 177
paulo@98 178 filter_out = ["\x16"]
paulo@98 179 for i in filter_out:
paulo@98 180 ret = ret.replace(i, "")
paulo@98 181
paulo@98 182 return ret
paulo@47 183
paulo@47 184
paulo@47 185 def _process_feed(feed):
paulo@47 186 ret = None
paulo@40 187
paulo@98 188 feed_sio = StringIO.StringIO(feed)
paulo@98 189 elementTree = xml.etree.ElementTree.parse(feed_sio)
paulo@40 190 root = elementTree.getroot()
paulo@40 191
paulo@40 192 parsed_root_tag = _parse_root_tag(root.tag)
paulo@40 193
paulo@40 194 if parsed_root_tag == (None, "rss"):
paulo@40 195 version = float(root.get("version", 0.0))
paulo@40 196 if version >= 2.0:
paulo@40 197 ret = _go_rss(elementTree)
paulo@40 198 else:
paulo@40 199 raise NotImplementedError("Unsupported rss version")
paulo@40 200 elif parsed_root_tag == ("http://www.w3.org/2005/Atom", "feed"):
paulo@40 201 ret = _go_atom(elementTree)
paulo@69 202 elif parsed_root_tag == ("http://www.w3.org/1999/02/22-rdf-syntax-ns#", "RDF"):
paulo@69 203 ret = _go_purl_rss(elementTree)
paulo@40 204 else:
paulo@40 205 raise NotImplementedError("Unknown root tag")
paulo@40 206
paulo@40 207 return ret
paulo@40 208
paulo@40 209
paulo@40 210 class WorkerThread(threading.Thread):
paulo@40 211 def __init__(self, *args, **kwargs):
paulo@40 212 self._input_queue = kwargs.pop("input_queue")
paulo@40 213 self._output_queue = kwargs.pop("output_queue")
paulo@40 214 threading.Thread.__init__(self, *args, **kwargs)
paulo@40 215 self.daemon = True
paulo@40 216
paulo@40 217 def run(self):
paulo@40 218 while True:
paulo@40 219 (idx, url) = self._input_queue.get()
paulo@40 220 docfeed = None
paulo@40 221 try:
paulo@47 222 feed = _fetch_url(url)
paulo@47 223 if feed is not None:
paulo@98 224 docfeed = _process_feed(_filter_feed(feed))
paulo@40 225 except Exception as e:
paulo@88 226 logging.info("(%s) exception: (%s) %s" % (url, type(e), e))
paulo@40 227 self._output_queue.put((idx, docfeed))
paulo@40 228
paulo@40 229
paulo@44 230 def main(input_queue, output_queue, lock):
paulo@41 231 ret = ''
paulo@41 232
paulo@44 233 with lock:
paulo@94 234 logging.debug("main() started")
paulo@44 235 epoch_now = time.time()
paulo@44 236 dtnow = datetime.datetime.fromtimestamp(epoch_now)
paulo@41 237
paulo@44 238 if os.path.exists(CACHE_HTML_FILE) and (epoch_now - os.stat(CACHE_HTML_FILE).st_mtime) < float(CACHE_LIFE):
paulo@44 239 with open(CACHE_HTML_FILE) as cache_html_file:
paulo@44 240 ret = cache_html_file.read()
paulo@41 241
paulo@44 242 else:
paulo@44 243 with open(FEEDS_FILE) as feeds_file:
paulo@44 244 feedlines = feeds_file.readlines()
paulo@41 245
paulo@44 246 docstruct = [None]*len(feedlines)
paulo@44 247 num_input = 0
paulo@44 248 for (i, l) in enumerate(feedlines):
paulo@44 249 if l[0] != '#':
paulo@44 250 l = l.strip()
paulo@44 251 input_queue.put((i, l))
paulo@44 252 num_input += 1
paulo@41 253
paulo@44 254 for _ in range(num_input):
paulo@44 255 (idx, docfeed) = output_queue.get()
paulo@44 256 docstruct[idx] = docfeed
paulo@41 257
paulo@44 258 ret = _to_html(dtnow, docstruct)
paulo@41 259
paulo@44 260 with open(CACHE_HTML_FILE, 'w') as cache_html_file:
paulo@44 261 cache_html_file.write(ret)
paulo@104 262 logging.debug("main() ended")
paulo@41 263
paulo@41 264 return ret
paulo@41 265
paulo@41 266
paulo@42 267 class MyRssApp:
paulo@42 268 def __init__(self):
paulo@94 269 logging.debug("MyRssApp.__init__() called")
paulo@42 270 self._iq = Queue.Queue(MAX_THREADS)
paulo@42 271 self._oq = Queue.Queue(MAX_THREADS)
paulo@44 272 self._main_lock = threading.Lock()
paulo@39 273
paulo@94 274 for i in range(MAX_THREADS):
paulo@94 275 logging.debug("Starting thread: %d" % i)
paulo@42 276 WorkerThread(input_queue=self._iq, output_queue=self._oq).start()
paulo@42 277
paulo@42 278 def __call__(self, environ, start_response):
paulo@70 279 response_code = "500 Internal Server Error"
paulo@70 280 response_type = "text/plain; charset=UTF-8"
paulo@70 281
paulo@70 282 try:
paulo@70 283 response_body = main(self._iq, self._oq, self._main_lock)
paulo@70 284 response_code = "200 OK"
paulo@70 285 response_type = "text/html; charset=UTF-8"
paulo@70 286 except:
paulo@70 287 response_body = traceback.format_exc()
paulo@70 288
paulo@42 289 response_headers = [
paulo@70 290 ("Content-Type", response_type),
paulo@42 291 ("Content-Length", str(len(response_body))),
paulo@42 292 ]
paulo@70 293 start_response(response_code, response_headers)
paulo@42 294
paulo@42 295 return [response_body]