geminispace.info

Unnamed repository; edit this file 'description' to name the repository.
git clone git://code.clttr.info/geminispace.info.git
Log | Files | Refs | README | LICENSE

commit 87ef15df2ebeb3bd17e3a69fb296f0ed657c4814
parent b5bf01a4454e1e904d09499038ef80ea8b5255e9
Author: René Wagner <rwa@clttr.info>
Date:   Sun, 11 Jul 2021 19:03:15 +0200

restructure crawl data

The "crawl" table is now obsolete and removed, all required
information is stored in the `page` table which simplifies
queries and will make data cleanup easier.

All relevant queries have been adjusted to honor this change.

Diffstat:
Mgus/build_index.py | 12+++++-------
Mgus/crawl.py | 62+++++++++++++++++++++++++++++---------------------------------
Mgus/excludes.py | 6+++---
Mgus/lib/db_model.py | 1-
Mgus/lib/index_statistics.py | 13++++++-------
Mgus/lib/search.py | 10+---------
Ainfra/rebuild_index.sh | 8++++++++
Dinfra/update_index.sh | 6------
Mserve/constants.py | 43-------------------------------------------
Mserve/models.py | 20+++++++-------------
Mserve/templates/about.gmi | 2--
Mserve/templates/documentation/indexing.gmi | 10+++-------
Mserve/templates/fragments/footer.gmi | 2++
Mserve/templates/index.gmi | 3++-
Mserve/templates/known_hosts.gmi | 3++-
Mserve/templates/news.gmi | 4++++
Mserve/views.py | 48++++++++++++++++--------------------------------
17 files changed, 88 insertions(+), 165 deletions(-)

diff --git a/gus/build_index.py b/gus/build_index.py @@ -24,7 +24,7 @@ uses_relative.append("gemini") uses_netloc.append("gemini") -def index_page(index, page, should_run_desctructive): +def index_page(index, page): if should_skip(GeminiResource(page.url)): logging.debug( "URL is excluded, skipping: %s", @@ -46,6 +46,7 @@ AND l.is_cross_host_like == 1""", f"{u}/", ) + logging.debug("Calculating backlinks for %s", u) backlink_urls = [b.url for b in external_backlinks.execute()] backlink_count = len(backlink_urls) @@ -65,11 +66,8 @@ AND l.is_cross_host_like == 1""", "content": page.content, } try: - if (page.indexed_at is None or should_run_destructive): - index.add_document(document) - else: - index.update_document(document) - + logging.debug("Adding document to index: %s", page.url); + index.add_document(document) page.indexed_at=datetime.utcnow() page.save() @@ -120,7 +118,7 @@ OR (p.content_type LIKE 'text/%' AND p.size <= ?))""", constants.MAXIMUM_TEXT_PA ) for page in pages.iterator(): - index_page(index, page, should_run_destructive) + index_page(index, page) try: index.close() diff --git a/gus/crawl.py b/gus/crawl.py @@ -41,13 +41,12 @@ EXCLUDED_URL_PATTERN = re.compile( def index_binary(resource, response): logging.debug( "Indexing binary for: %s", - gus.lib.logging.strip_control_chars(resource.indexable_url), + gus.lib.logging.strip_control_chars(resource.normalized_url), ) doc = { - "url": resource.indexable_url, + "url": resource.normalized_url, "fetchable_url": resource.fetchable_url, - "normalized_url": resource.normalized_url, "domain": resource.normalized_host, "port": resource.urlsplit.port or 1965, "content_type": response.content_type, @@ -59,7 +58,7 @@ def index_binary(resource, response): "last_status" : response.status, "last_stats_message" : response.error_message, } - existing_page = Page.get_or_none(url=resource.indexable_url) + existing_page = Page.get_or_none(url=resource.normalized_url) if existing_page: doc["id"] = existing_page.id existing_change_frequency = ( @@ -73,7 +72,7 @@ def index_binary(resource, response): try: page.save() except: - logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.indexable_url)) + logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.normalized_url)) return page @@ -81,13 +80,12 @@ def index_binary(resource, response): def index_redirect(resource, response): logging.debug( "Indexing redirect for: %s", - gus.lib.logging.strip_control_chars(resource.indexable_url), + gus.lib.logging.strip_control_chars(resource.normalized_url), ) doc = { - "url": resource.indexable_url, + "url": resource.normalized_url, "fetchable_url": resource.fetchable_url, - "normalized_url": resource.normalized_url, "domain": resource.normalized_host, "port": resource.urlsplit.port or 1965, "change_frequency": resource.get_default_change_frequency("redirect"), @@ -96,7 +94,7 @@ def index_redirect(resource, response): "last_status" : response.status, "last_stats_message" : response.error_message, } - existing_page = Page.get_or_none(url=resource.indexable_url) + existing_page = Page.get_or_none(url=resource.normalized_url) if existing_page: doc["id"] = existing_page.id existing_change_frequency = ( @@ -110,7 +108,7 @@ def index_redirect(resource, response): try: page.save() except: - logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.indexable_url)) + logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.normalized_url)) return page @@ -119,18 +117,16 @@ def index_error(resource, is_temporary, response): category = "temp_error" if is_temporary else "perm_error" default_change_frequency = resource.get_default_change_frequency(category) doc = { - "url": resource.indexable_url, + "url": resource.normalized_url, "fetchable_url": resource.fetchable_url, - "normalized_url": resource.normalized_url, "domain": resource.normalized_host, "port": resource.urlsplit.port or 1965, "change_frequency": default_change_frequency, "last_crawl_at": datetime.utcnow(), - "last_crawl_success_at": datetime.utcnow(), - "last_status" : None, - "last_status_message" : None, + "last_status" : None if response is None else response.status, + "last_status_message" : None if response is None else response.error_message } - existing_page = Page.get_or_none(url=resource.indexable_url) + existing_page = Page.get_or_none(url=resource.normalized_url) if existing_page: doc["id"] = existing_page.id existing_change_frequency = ( @@ -143,7 +139,7 @@ def index_error(resource, is_temporary, response): try: page.save() except: - logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.indexable_url)) + logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.normalized_url)) return page @@ -151,13 +147,12 @@ def index_error(resource, is_temporary, response): def index_prompt(resource, response): logging.debug( "Indexing prompt for: %s", - gus.lib.logging.strip_control_chars(resource.indexable_url), + gus.lib.logging.strip_control_chars(resource.normalized_url), ) doc = { - "url": resource.indexable_url, + "url": resource.normalized_url, "fetchable_url": resource.fetchable_url, - "normalized_url": resource.normalized_url, "domain": resource.normalized_host, "port": resource.urlsplit.port or 1965, "content_type": "input", @@ -170,7 +165,7 @@ def index_prompt(resource, response): "last_status" : response.status, "last_stats_message" : response.error_message, } - existing_page = Page.get_or_none(url=resource.indexable_url) + existing_page = Page.get_or_none(url=resource.normalized_url) if existing_page: doc["id"] = existing_page.id existing_change_frequency = ( @@ -184,7 +179,7 @@ def index_prompt(resource, response): try: page.save() except: - logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.indexable_url)) + logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.normalized_url)) return page @@ -192,13 +187,12 @@ def index_prompt(resource, response): def index_content(resource, response): logging.debug( "Indexing content for: %s", - gus.lib.logging.strip_control_chars(resource.indexable_url), + gus.lib.logging.strip_control_chars(resource.normalized_url), ) doc = { - "url": resource.indexable_url, + "url": resource.normalized_url, "fetchable_url": resource.fetchable_url, - "normalized_url": resource.normalized_url, "domain": resource.normalized_host, "port": resource.urlsplit.port or 1965, "content_type": response.content_type, @@ -213,7 +207,7 @@ def index_content(resource, response): } if response.content_type == "text/gemini": doc["lang"] = (response.lang or "none",) - existing_page = Page.get_or_none(url=resource.indexable_url) + existing_page = Page.get_or_none(url=resource.normalized_url) is_different = False if existing_page: doc["id"] = existing_page.id @@ -235,7 +229,7 @@ def index_content(resource, response): try: page.save() except: - logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.indexable_url)) + logging.error("Error adding page: %s", gus.lib.logging.strip_control_chars(resource.normalized_url)) return page, is_different @@ -257,8 +251,10 @@ def should_skip(resource): def index_links(from_resource, contained_resources): - from_page, created = Page.get_or_create(url=from_resource.indexable_url) - + from_page, created = Page.get_or_create(url=from_resource.normalized_url) + + ## first delete all links that this page as had before + ## than add new links try: Link.delete().where(Link.from_page == from_page).execute() except: @@ -267,10 +263,10 @@ def index_links(from_resource, contained_resources): for cr in contained_resources: if should_skip(cr): continue - to_page = Page.get_or_none(url=cr.indexable_url) + to_page = Page.get_or_none(url=cr.normalized_url) if not to_page: to_page = Page.create( - url=cr.indexable_url, + url=cr.normalized_url, fetchable_url=cr.fetchable_url, domain=cr.normalized_host, port=cr.urlsplit.port or 1965, @@ -331,7 +327,7 @@ def crawl_page( ) return if should_check_if_expired: - existing_page = Page.get_or_none(url=gr.indexable_url) + existing_page = Page.get_or_none(url=gr.normalized_url) if existing_page and existing_page.change_frequency is not None: most_recent_crawl = existing_page.last_crawl_at if most_recent_crawl and datetime.now() < most_recent_crawl + timedelta( @@ -393,7 +389,7 @@ def crawl_page( if response is None: # problem before getting a response logging.warn("Failed to fetch: %s", gus.lib.logging.strip_control_chars(url)) - page = index_error(gr, True, response) + page = index_error(gr, True, None) failure_count[gr.normalized_host] = failure_count[gr.normalized_host] + 1 if gr.normalized_host in failure_count else 1 logging.debug("Failed request count for host %s is %d", gr.normalized_host, failure_count[gr.normalized_host]) diff --git a/gus/excludes.py b/gus/excludes.py @@ -51,7 +51,7 @@ EXCLUDED_URL_PREFIXES = [ "gemini://runjimmyrunrunyoufuckerrun.com/tmp/", "gemini://gemini.conman.org/boston/", - # Internal + # Search providers "gemini://gus.guru/search/", "gemini://gus.guru/v/search/", "gemini://gus.guru/search?", @@ -59,10 +59,9 @@ EXCLUDED_URL_PREFIXES = [ "gemini://gus.guru/add-seed?", "gemini://gus.guru/backlinks?", "gemini://gus.guru/threads", - - # Houston "gemini://houston.coder.town/search?", "gemini://houston.coder.town/search/", + "gemini://marginalia.nu/search", # Geddit "gemini://geddit.pitr.ca/post?", @@ -189,6 +188,7 @@ EXCLUDED_URL_PREFIXES = [ # full web proxy "gemini://drewdevault.com/cgi-bin/web.sh?", + "gemini://gemiprox.pollux.casa/", # killing crawl, I think maybe because it's too big "gemini://gem.denarii.cloud/pichaindata.zip", diff --git a/gus/lib/db_model.py b/gus/lib/db_model.py @@ -31,7 +31,6 @@ class Page(Model): url = TextField(unique=True, index=True) fetchable_url = TextField(null=True) - normalized_url = TextField(null=True) domain = TextField(null=True) port = IntegerField(null=True) content_type = TextField(null=True) diff --git a/gus/lib/index_statistics.py b/gus/lib/index_statistics.py @@ -9,11 +9,11 @@ from gus.lib.db_model import Page def compute_index_statistics(db): page_count = len(Page.raw("""SELECT DISTINCT p.id -FROM page AS p -GROUP BY p.normalized_url""").dicts()) +FROM page AS p""").dicts()) domains_query = Page.raw("""SELECT DISTINCT p.domain, p.port -FROM page AS p""") +FROM page AS p +WHERE last_crawl_success_at IS NOT NULL""") domains = [] for d in domains_query.execute(): s = d.domain @@ -31,15 +31,14 @@ FROM page AS p""") content_type_frequencies = (Page.raw("""SELECT p.content_type, count(p.content_type) as 'count' FROM page AS p -GROUP BY p.normalized_url, p.content_type +GROUP BY p.content_type ORDER BY 2 desc""").dicts()) charset_frequencies = (Page.raw("""SELECT upper(p.charset), count(p.id) as 'count' FROM page AS p WHERE p.charset IS NOT NULL GROUP BY upper(p.charset) ORDER BY 2 desc""").dicts()) - #index_modification_time = datetime.fromisoformat(Page.select(fn.Max(last_crawl_at)).scalar()) - index_modification_time = datetime.utcnow() + index_modification_time = Page.select(fn.Max(Page.last_crawl_at)).scalar() return { "index_modification_time": index_modification_time, @@ -82,7 +81,7 @@ def log_index_statistics(index_statistics, crawl_statistics=None): def run_index_statistics(): index_statistics = compute_index_statistics("index") log_index_statistics(index_statistics, None) - # persist_index_statistics(index_statistics, "index-statistics.csv") + # persist_index_statistics(index_statistics, "index-statistics.csv") def persist_statistics(index_statistics, crawl_statistics, was_destructive, filename): diff --git a/gus/lib/search.py b/gus/lib/search.py @@ -88,19 +88,11 @@ class Index: return self._writer def add_document(self, document): - self._rolling_writer().add_document(**document) + self._rolling_writer().update_document(**document) def delete_by_term(self, key, val): # TODO delete_document self._rolling_writer().delete_by_term(key, val, searcher=None) - def indexed_urls(self): - indexed_urls = [] - with self._index.reader() as reader: - logging.debug("Loading list of known URLs from index") - all_stored_fields = reader.all_stored_fields() - indexed_urls = [f["fetchable_url"] for f in all_stored_fields] - return indexed_urls - def parse_query(self, query): return self._query_parser.parse(query) diff --git a/infra/rebuild_index.sh b/infra/rebuild_index.sh @@ -0,0 +1,8 @@ +cp -r /home/gus/index /home/gus/index.new +#rm -rf /home/gus/index.new/MAIN* +#rm -rf /home/gus/index.new/_MAIN* +/home/gus/.poetry/bin/poetry run build_index -d +rm -rf /home/gus/index.old +rm -rf /home/gus/index.new/MAIN.tmp/ +mv /home/gus/index /home/gus/index.old +mv /home/gus/index.new /home/gus/index diff --git a/infra/update_index.sh b/infra/update_index.sh @@ -1,6 +0,0 @@ -cp -r /home/gus/index /home/gus/index.new -/home/gus/.poetry/bin/poetry run build_index -rm -rf /home/gus/index.old -rm -rf /home/gus/index.new/MAIN.tmp/ -mv /home/gus/index /home/gus/index.old -mv /home/gus/index.new /home/gus/index diff --git a/serve/constants.py b/serve/constants.py @@ -3,46 +3,3 @@ SEED_REQUEST_FILE = "seed-requests.txt" STATISTICS_FILE = "statistics.csv" DB_FILENAME = "gus.sqlite" -QUOTE_BANK = [ - { - "quote": "Isn’t it enough to see that a garden is beautiful without having to believe that there are fairies at the bottom of it too?", - "author": "Douglas Adams", - }, - { - "quote": "The ships hung in the sky in much the same way that bricks don’t.", - "author": "Douglas Adams", - }, - { - "quote": "I went to the doctor and all he did was suck blood from my neck. Do not go see Dr. Acula.", - "author": "Mitch Hedberg", - }, - { - "quote": "I'm sick of people saying that age is a number... when it is clearly a word.", - "author": "Mitch Hedberg", - }, - { - "quote": "Some people think this is paranoia, but it isn't. Paranoids only think everyone is out to get them. Wizards know it.", - "author": "Terry Pratchett", - }, - { - "quote": "If it looks like a duck and sounds like a duck, it could be a really ugly swan.", - "author": "Timothy Radman", - }, - { - "quote": "If I cease searching, then, woe is me, I am lost. That is how I look at it - keep going, keep going come what may.", - "author": "Vincent Van Gogh", - }, - { - "quote": "Nobody ever figures out what life is all about, and it doesn't matter. Explore the world. Nearly everything is really interesting if you go into it deeply enough.", - "author": "Richard Feynman", - }, - { - "quote": "I would rather have questions that can't be answered than answers that can't be questioned.", - "author": "Richard Feynman", - }, - { - "quote": "The truth will set you free. But not until it is finished with you.", - "author": "David Foster Wallace", - }, - {"quote": "Jazz isn't dead. It just smells funny.", "author": "Frank Zappa",}, -] diff --git a/serve/models.py b/serve/models.py @@ -52,15 +52,15 @@ class GUS: if not resource.is_valid: return [], [] - u = resource.indexable_url.rstrip("/") + u = resource.normalized_url.rstrip("/") backlinks_query = Page.raw( """SELECT p_from.url, l.is_cross_host_like FROM page AS p_from JOIN link as l ON l.from_page_id == p_from.id JOIN page as p_to ON p_to.id == l.to_page_id WHERE p_to.url IN (?, ?) -AND p_from.normalized_url != ? -GROUP BY p_from.normalized_url +AND p_from.url != ? +GROUP BY p_from.url ORDER BY l.is_cross_host_like, p_from.url ASC""", u, f"{u}/", @@ -174,14 +174,12 @@ ORDER BY t.thread_length DESC, t.updated_at DESC, t.id ASC, tp.address ASC""" feeds_query = Page.raw( """SELECT DISTINCT p.* FROM page AS p -JOIN indexable_crawl AS c -ON c.page_id == p.id -WHERE p.url LIKE '%atom.xml' +WHERE (p.url LIKE '%atom.xml' OR p.url LIKE '%feed.xml' OR p.url LIKE '%.rss' OR p.url LIKE '%.atom' -OR p.content_type IN ('application/atom+xml', 'application/rss+xml') -""" +OR p.content_type IN ('application/atom+xml', 'application/rss+xml')) +AND p.last_crawl_success_at IS NOT NULL""" ) return feeds_query.execute() @@ -201,12 +199,8 @@ OR p.content_type IN ('application/atom+xml', 'application/rss+xml') def get_newest_pages(self): newest_pages_query = Page.raw( - """SELECT p.url, p.fetchable_url, MIN(c.timestamp) AS first_seen + """SELECT p.url, p.fetchable_url AS first_seen FROM page as p -JOIN indexable_crawl AS ic -ON ic.page_id == p.id -JOIN crawl AS c -ON c.page_id == p.id GROUP BY p.url ORDER BY first_seen DESC LIMIT 50 diff --git a/serve/templates/about.gmi b/serve/templates/about.gmi @@ -8,8 +8,6 @@ geminispace.info is a search engine for content served over the Gemini Protocol. geminispace.info is powered by GUS, an open-source crawler & search engine made by Natalie Pendragon and contributors. => https://natpen.net/code/gus/ The source code of GUS is publicly available. -geminispace.info is NOT a fork, all changes in regard to crawling, indexing and frontend are meant to be upstreamed if appropiate. - ### index updates Index updates run currently every three day and happen mostly unattended. diff --git a/serve/templates/documentation/indexing.gmi b/serve/templates/documentation/indexing.gmi @@ -16,10 +16,6 @@ Textual pages over 1MB in size will not be indexed. Please note that GUS' indexing has provisions for manually excluding content from it, which maintainers will typically use to exclude pages and domains that cause issues with index relevance or crawl success. GUS ends up crawling weird protocol experiments, proofs of concepts, and whatever other bizarre bits of technical creativity folks put up in Geminispace, so it is a continual effort to keep the index healthy. Please don't take it personally if your content ends up excluded, and I promise we are continually working to make GUS indexing more resilient and scalable! -### How often does GUS index? - -GUS currently tends to update its index a few times per month. The last updated date at the bottom of each page will tell you the last time this happened. - ### Controlling what GUS indexes with a robots.txt To control crawling of your site, you can use a robots.txt file, Place it in your capsule's root directory such that a request for "robots.txt" will fetch it. It should be returned with a mimetype of `text/plain`. @@ -30,11 +26,11 @@ GUS obeys User-agent of "indexer" and "*". You can identify the GUS by looking for any requests to your site made by the following IP addresses: -* IPv6: 2604:a880:400:d0::17e4:b001 -* IPv4: 198.199.84.116 +* IPv6: 2a03:4000:53:f82:b8f1:ff:fe15:5ec9 +* IPv4: 202.61.246.155 ### Does GUS keep my content forever? -No. After repeated failed attempts to connect to a page (e.g., because it moved, or because the capsule got taken down, or because of a server error on your host), GUS will eventually invalidate that page in its index, thus removing it from search results. +No. After repeated failed attempts to connect to a page (e.g., because it moved, or because the capsule got taken down, or because of a server error on your host), GUS will invalidate that page after 1 month of unavailability in its index, thus removing it from search results. {% include 'fragments/footer.gmi' %} diff --git a/serve/templates/fragments/footer.gmi b/serve/templates/fragments/footer.gmi @@ -1,3 +1,5 @@ + => /add-seed missing results? add your capsule to geminispace.info Index updated on: {{ index_modification_time|datetimeformat }} + diff --git a/serve/templates/index.gmi b/serve/templates/index.gmi @@ -6,8 +6,10 @@ => /statistics geminispace.info Statistics => /known-hosts Known Gemini Hosts => /known-feeds Known Gemini Feeds +{% if (false) %} => /newest-hosts Newest Gemini Hosts => /newest-pages Newest Gemini Pages +{% endif %} ## Help and Documentation @@ -16,5 +18,4 @@ {% include 'fragments/documentation-toc.gmi' %} - {% include 'fragments/footer.gmi' %} diff --git a/serve/templates/known_hosts.gmi b/serve/templates/known_hosts.gmi @@ -4,8 +4,9 @@ ## Known Gemini Hosts Below are the hosts in Geminispace of which geminispace.info is aware. Note that this list is auto-generated from the index, so if your host is not showing up here, it also won't have its content represented in search results! If your server is missing, please use the link at the bottom of this page to submit a crawl request of your Gemini URL, after which your server should start showing up. - +{% if (false) %} => /newest-hosts Show me the newest 10 hosts! +{% endif %} {% for host in known_hosts %} {{ "=> gemini://{} {}".format(host, host) }} diff --git a/serve/templates/news.gmi b/serve/templates/news.gmi @@ -2,6 +2,10 @@ ## News +### 2021-07-11 +The revamped data store seems to work fine so far. +Unfortunately i had to disable the "newest hosts" and "newest pages" sites as the data is currently not available. I'll add that back again later, but before this i'd like to have the cleanup mechanismn implemented to get rid of old data from capsules that are no longer available. + ### 2021-07-10 If finally managed to analyze the index process. In the end it turned out to be an issue when calculating the backlink counters and with an adapted query indexing is fast again. Obviously i was horribly wrong all the time blaming the slow vps. diff --git a/serve/views.py b/serve/views.py @@ -1,6 +1,5 @@ import math import os -import random from datetime import datetime from urllib.parse import quote, unquote @@ -91,8 +90,7 @@ def statistics(request): body = render_template( "statistics.gmi", statistics=gus.statistics, - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -102,8 +100,7 @@ def statistics(request): body = render_template( "statistics_historical_overall.gmi", statistics_historical_overall=gus.statistics_historical_overall, - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -114,8 +111,7 @@ def known_hosts(request): "known_hosts.gmi", # TODO: remove this `sorted` after the next index generation known_hosts=sorted(gus.statistics["domains"]), - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -125,8 +121,7 @@ def newest_hosts(request): body = render_template( "newest_hosts.gmi", newest_hosts=gus.get_newest_hosts(), - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -136,8 +131,7 @@ def newest_pages(request): body = render_template( "newest_pages.gmi", newest_pages=gus.get_newest_pages(), - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -147,9 +141,9 @@ def known_feeds(request): body = render_template( "known_feeds.gmi", known_feeds=gus.get_feeds(), - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) + return Response(Status.SUCCESS, "text/gemini", body) @@ -157,8 +151,7 @@ def known_feeds(request): def index(request): body = render_template( "index.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -167,8 +160,7 @@ def index(request): def index(request): body = render_template( "about.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -177,8 +169,7 @@ def index(request): def documentation_searching(request): body = render_template( "documentation/searching.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -187,8 +178,7 @@ def documentation_searching(request): def documentation_indexing(request): body = render_template( "documentation/indexing.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -197,8 +187,7 @@ def documentation_indexing(request): def documentation_backlinks(request): body = render_template( "documentation/backlinks.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -207,8 +196,7 @@ def documentation_backlinks(request): def index(request): body = render_template( "news.gmi", - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) @@ -235,8 +223,7 @@ def search(request): results=results, current_page=current_page, num_pages=num_pages, - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) else: search_suggestions = gus.get_search_suggestions(request.query) @@ -244,8 +231,7 @@ def search(request): "search_suggestions.gmi", query=request.query, search_suggestions=search_suggestions, - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return body @@ -282,8 +268,7 @@ def backlinks(request): url=url, internal_backlinks=internal_backlinks, external_backlinks=external_backlinks, - index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), + index_modification_time=gus.statistics["index_modification_time"] ) return Response(Status.SUCCESS, "text/gemini", body) else: @@ -299,6 +284,5 @@ def threads(request): threads=threads, sort=sort, index_modification_time=gus.statistics["index_modification_time"], - quote=random.choice(constants.QUOTE_BANK), ) return Response(Status.SUCCESS, "text/gemini", body)