pdate help documentation for Yioop 7.0

Chris Pollett [2020-06-30 07:Jun:th]
pdate help documentation for Yioop 7.0
Update README.txt, INSTALL.txt
Update composer.json for new PHP requirements
Lower MEMORY_FILL_FACTOR to reduce crashes
Tweak CSS for help to make work again under mobile.
Tweak Help.js to make case of no wiki page  for help item
work again.
Make proxy  crawling compatible with spam site detection.
Allow crawling of proxies with username password.
Notice suppression in PdfProcessor
Filename
INSTALL.txt
README.txt
composer.json
composer.lock
src/configs/Config.php
src/configs/PublicHelpPages.php
src/css/search.css
src/data/public_default.db
src/executables/MediaUpdater.php
src/executables/QueueServer.php
src/library/FetchUrl.php
src/library/StochasticTermSegmenter.php
src/library/processors/PdfProcessor.php
src/locale/ar/configure.ini
src/locale/bn/configure.ini
src/locale/de/configure.ini
src/locale/en_US/configure.ini
src/locale/es/configure.ini
src/locale/fa/configure.ini
src/locale/fr_FR/configure.ini
src/locale/he/configure.ini
src/locale/hi/configure.ini
src/locale/id/configure.ini
src/locale/it/configure.ini
src/locale/ja/configure.ini
src/locale/kn/configure.ini
src/locale/ko/configure.ini
src/locale/nl/configure.ini
src/locale/pl/configure.ini
src/locale/pt/configure.ini
src/locale/ru/configure.ini
src/locale/te/configure.ini
src/locale/th/configure.ini
src/locale/tl/configure.ini
src/locale/tr/configure.ini
src/locale/vi_VN/configure.ini
src/locale/zh_CN/configure.ini
src/models/MachineModel.php
src/models/PhraseModel.php
src/models/SourceModel.php
src/scripts/help.js
src/views/AdminView.php
src/views/MachinestatusView.php
src/views/elements/LanguageElement.php
src/views/elements/MediajobsElement.php
src/views/elements/ScrapersElement.php
src/views/elements/SearchsourcesElement.php
tests/UtilityTest.php
diff --git a/INSTALL.txt b/INSTALL.txt
index 5a3729037..fd31eb219 100755
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -1,7 +1,7 @@
 SeekQuarry/Yioop --
 Open Source Pure PHP Search Engine, Crawler, and Indexer

-Copyright (C) 2009 - 2019  Chris Pollett chris@pollett.org
+Copyright (C) 2009 - 2020  Chris Pollett chris@pollett.org

 http://www.seekquarry.com/

diff --git a/README.txt b/README.txt
index dd5f55e14..35410ced8 100755
--- a/README.txt
+++ b/README.txt
@@ -1,7 +1,7 @@
 SeekQuarry/Yioop --
 Open Source Pure PHP Search Engine, Crawler, and Indexer

-Copyright (C) 2009 - 2019  Chris Pollett chris@pollett.org
+Copyright (C) 2009 - 2020  Chris Pollett chris@pollett.org

 http://www.seekquarry.com/

@@ -45,20 +45,21 @@ http://www.seekquarry.com/

 Requirements
 ------------
-The Yioop search engine requires PHP 5.6.
+The Yioop search engine requires PHP 7.1.

 Credits
 ------
 The source code is mainly due to Chris Pollett.
-Other contributors include: Mangesh Dahale, Ravi Dhillon, Priya Gangaraju,
-Akshat Kukreti, Pooja Mishra, Sreenidhi Pundi Muralidharan,
-Nakul Natu, Shailesh Padave, Vijaya Pamidi, Snigdha Parvatneni,
-Akash Patel, Vijeth Patil, Mallika Perepa, Tarun Pepira,
-Eswara Rajesh Pinapala, Tamayee Potluri, Shawn Tice, Pushkar Umaranikar,
-Sandhya Vissapragada. Several people helped with localization:
-My wife, Mary Pollett, Jonathan Ben-David, Ismail.B, Andrea Brunetti,
-Thanh Bui, Sujata Dongre, Animesh Dutta, Aida Khosroshahi, Radha Kotipalli,
-Youn Kim, Akshat Kukreti, Chao-Hsin Shih, Ahmed Kamel Taha, and Sugi Widjaja.
+Other contributors include: Charles Bocage, Timothy Chow, Mangesh Dahale,
+Ravi Dhillon, Priya Gangaraju, Yangcha Ho, Akshat Kukreti, Pooja Mishra,
+Sreenidhi Pundi Muralidharan, Nakul Natu, Shailesh Padave, Vijaya Pamidi,
+Snigdha Parvatneni, Akash Patel, Vijeth Patil, Mallika Perepa, Tarun Pepira,
+Eswara Rajesh Pinapala, Tamayee Potluri, Forrest Sun, Shawn Tice,
+Pushkar Umaranikar, Sandhya Vissapragada. Several people helped with
+localization: My wife, Mary Pollett, Jonathan Ben-David, Ismail.B,
+Andrea Brunetti, Thanh Bui, Sujata Dongre, Animesh Dutta, Aida Khosroshahi,
+Radha Kotipalli, Youn Kim, Akshat Kukreti, Chao-Hsin Shih, Ahmed Kamel Taha,
+and Sugi Widjaja.

 Installation
 -------------
diff --git a/composer.json b/composer.json
index addfb463c..e47cc015a 100644
--- a/composer.json
+++ b/composer.json
@@ -11,7 +11,7 @@
     ],
     "minimum-stability": "stable",
     "require": {
-        "php": ">=5.4.0",
+        "php": ">=7.1.0",
         "ext-dom": "*",
         "ext-gd": "*",
         "ext-json": "*",
diff --git a/composer.lock b/composer.lock
index 8f805d62b..45b5ef029 100644
--- a/composer.lock
+++ b/composer.lock
@@ -1,11 +1,10 @@
 {
     "_readme": [
         "This file locks the dependencies of your project to a known state",
-        "Read more about it at https://getcomposer.org/doc/01-basic-usage.md#composer-lock-the-lock-file",
+        "Read more about it at https://getcomposer.org/doc/01-basic-usage.md#installing-dependencies",
         "This file is @generated automatically"
     ],
-    "hash": "a9a653eb7781a772e1ab492dd4eee5e8",
-    "content-hash": "9a00ca22dcc4a39f7ecfbdf2444131f2",
+    "content-hash": "1eb516c6c8d355f642e2c09da6402afe",
     "packages": [],
     "packages-dev": [],
     "aliases": [],
@@ -14,7 +13,7 @@
     "prefer-stable": false,
     "prefer-lowest": false,
     "platform": {
-        "php": ">=5.4.0",
+        "php": ">=7.1.0",
         "ext-dom": "*",
         "ext-gd": "*",
         "ext-json": "*",
diff --git a/src/configs/Config.php b/src/configs/Config.php
index 5c2dad60d..ac858820b 100755
--- a/src/configs/Config.php
+++ b/src/configs/Config.php
@@ -458,7 +458,7 @@ if (file_exists(WORK_DIRECTORY . PROFILE_FILE_NAME)) {
     /** @ignore */
     nsdefine('LOCALE_DIR', FALLBACK_LOCALE_DIR);
     /** @ignore */
-    nsdefine('LOG_DIR', BASE_DIR."/log");
+    nsdefine('LOG_DIR', BASE_DIR . "/log");
     nsdefine('NAME_SERVER', "http://localhost/");
     nsdefine('USER_AGENT_SHORT', "NeedsNameBot");
     nsdefine('DEFAULT_LOCALE', "en-US");
@@ -724,7 +724,7 @@ nsconddefine('TOKEN_TOOL_MEMORY_LIMIT', ceil(MEMORY_PROFILE/2) . "000M");
  *  (usually Fetcher or QueueServer) before action (such as switch shard)
  *  on current class (usually IndexArchiveBundle) is taken.
  */
-nsconddefine('MEMORY_FILL_FACTOR', 0.65);
+nsconddefine('MEMORY_FILL_FACTOR', 0.6);
 /**
  * bloom filters are used to keep track of which urls are visited,
  * this parameter determines up to how many
diff --git a/src/configs/PublicHelpPages.php b/src/configs/PublicHelpPages.php
index f814c0422..02b12a402 100644
--- a/src/configs/PublicHelpPages.php
+++ b/src/configs/PublicHelpPages.php
@@ -869,7 +869,7 @@ page_border=solid-border

 toc=true

-title=Account Registration
+title=Account+Registration

 author=

@@ -877,30 +877,27 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARSThe Account Registration field-set is used to control how user's can obtain accounts on a Yioop installation.
+sort=aname

-The dropdown at the start of this fieldset allows you to select one of four
-possibilities:
-* '''Disable Registration''', users cannot register themselves, only the root
-account can add users.
-When Disable Registration is selected, the Suggest A Url form and link on
-the tool.php page is disabled as well, for all other registration type this
-link is enabled.
-* '''No Activation''', user accounts are immediately activated once a user
-signs up.
-* '''Email Activation''', after registering, users must click on a link which
-comes in a separate email to activate their accounts.
-If Email Activation is chosen, then the reset of this field-set can be used
-to specify the email address that the email comes to the user. The checkbox Use
-PHP mail() function controls whether to use the mail function in PHP to send
-the mail, this only works if mail can be sent from the local machine.
-Alternatively, if this is not checked like in the image above, one can
-configure an outgoing SMTP server to send the email through.
-* '''Admin Activation''', after registering, an admin account must activate
+END_HEAD_VARSThe Account Registration field-set is used to control how user's can obtain accounts on a Yioop installation. The dropdown at the start of this fieldset allows you to select one of four
+possibilities:
+* '''Disable Registration''', users cannot register themselves, only the root
+account can add users. When Disable Registration is selected, the Suggest A Url form and link on
+the tool.php page is disabled as well, for all other registration type this
+link is enabled.
+* '''No Activation''', user accounts are immediately activated once a user
+signs up.
+* '''Email Activation''', after registering, users must click on a link which
+comes in a separate email to activate their accounts. If Email Activation is chosen, then the rest of this field-set can be used to specify the email address that the email comes to the user. The checkbox Use
+PHP mail() function controls whether to use the mail function in PHP to send the mail, this only works if mail can be sent from the local machine. Alternatively, if this is not checked like in the image above, one can
+configure an outgoing SMTP server to send the email through.
+* '''Admin Activation''', after registering, an admin account (an account having the admin role) must activate
 the user before the user is allowed to use their account.
 EOD;
 $help_pages["en-US"]["Ad_Server"] = <<< EOD
@@ -1145,13 +1142,13 @@ page_footer=

 sort=aname

-END_HEAD_VARSThe Bot Configuration field-set is used to control whether user&#039;s of this Yioop instance can be chat bots.
-&lt;br/&gt;
-
-If enabled under &#039;&#039;&#039;Manage Accounts&#039;&#039;&#039; a Yioop user can declare themselves a chat bot and give a callback url.
-&lt;br/&gt;
-
-Suppose a chat bot user has a name user name, &#039;&#039;user1&#039;&#039;. If that chat bot user belongs to a group, and in an already existing thread, someone posts a follow up comment containing &#039;&#039;user1&#039;&#039;, then that message will be sent in a post field together with a bot_token field to the callback url. The response from the url will then be used in a response to the comment (if any).
+END_HEAD_VARSThe Bot Configuration field-set is used to control whether user&#039;s of this Yioop instance can be chat bots.
+&lt;br/&gt;
+
+If enabled, under &#039;&#039;&#039;Manage Accounts&#039;&#039;&#039; a Yioop user can check a checkbox to declare to be a chat bot. Chat bot user can then give a callback url where the bot functionality will be implemented.
+&lt;br/&gt;
+
+Suppose a chat bot user has a name user name, &#039;&#039;user1&#039;&#039;. If that chat bot user belongs to a group, and in an already existing thread, someone posts a follow up comment containing &#039;&#039;user1&#039;&#039;, then that message will be sent in a post field together with a bot_token field to the Bot user&#039;s callback url. The response from the url will then be used in a response to the comment (if any).
 EOD;
 $help_pages["en-US"]["Bot_Story_Patterns"] = <<< EOD
 page_type=standard
@@ -1264,7 +1261,7 @@ page_border=solid-border

 toc=true

-title=Captcha Type
+title=Captcha+Type

 author=

@@ -1278,22 +1275,20 @@ page_header=

 page_footer=

-END_HEAD_VARSThe Captcha Type field set controls what kind of
-[[https://en.wikipedia.org/wiki/CAPTCHA|captcha]] will be used during account
-registration, password recovery, and if a user wants to suggest a url. The choices for captcha are:
-* &#039;&#039;&#039;Text Captcha&#039;&#039;&#039;, the user has to select from a series of dropdown answers
-to questions of the form: &#039;&#039;Which in the following list is the most/largest/etc?
-or Which is the following list is the least/smallest/etc?; &#039;&#039;
-* &#039;&#039;&#039;Graphic Captcha&#039;&#039;&#039;, the user needs to enter a sequence of characters from
-a distorted image;
-* &#039;&#039;&#039;Hash captcha&#039;&#039;&#039;, the user&#039;s browser (the user doesn&#039;t need to do anything)
-needs to extend a random string with additional characters to get a string
-whose hash begins with a certain lead set of characters.
+sort=aname

-Of these, Hash Captcha is probably the least intrusive but requires
-Javascript and might run slowly on older browsers. A text captcha might be used
-to test domain expertise of the people who are registering for an account.
-Finally, the graphic captcha is probably the one people are most familiar with.
+END_HEAD_VARSThe Captcha Type field set controls what kind of
+[[https://en.wikipedia.org/wiki/CAPTCHA|captcha]] will be used during account
+registration, password recovery, and if a user wants to suggest a url. The choices for captcha are:
+* &#039;&#039;&#039;Hash captcha&#039;&#039;&#039;, the user&#039;s browser (the user doesn&#039;t need to do anything)
+needs to extend a random string with additional characters to get a string
+whose hash begins with a certain lead set of characters.
+* &#039;&#039;&#039;Graphic Captcha&#039;&#039;&#039;, the user needs to enter a sequence of characters from
+a distorted image;
+
+Of these, Hash Captcha is probably the least intrusive but requires
+Javascript and might run slowly on older browsers.
+The graphic captcha is probably the one people are most familiar with.
 EOD;
 $help_pages["en-US"]["Changing_the_Classifier_Label"] = <<< EOD
 page_type=standard
@@ -1322,6 +1317,48 @@ END_HEAD_VARSThe label of a classifier determines what meta-words will be added

 If the label is foo, and the foo classifier is used in a crawl, then pages which have the foo property
 will have the meta-word class:foo added to the list of words that are indexed.
+EOD;
+$help_pages["en-US"]["Configure_Media_Jobs"] = <<< EOD
+page_type=standard
+
+page_alias=
+
+page_border=solid-border
+
+toc=true
+
+title=
+
+author=
+
+robots=
+
+description=
+
+alternative_path=
+
+page_header=
+
+page_footer=
+
+sort=aname
+
+END_HEAD_VARSThe &#039;&#039;&#039;Configure Media Jobs&#039;&#039;&#039; activity let&#039;s one specify which media jobs should be run and how they should be run. Media jobs are periodic jobs which are run by a Yioop installation.&lt;br&gt;&lt;br&gt;
+
+
+The &#039;&#039;&#039;Mode&#039;&#039;&#039; toggle let&#039;s one choose if the media updater is only run on the name server or if it is also run on all of the machines associated with a name server (distributed mode). The former is easier to manage, the later can improve the performance of some of media updater jobs such as video conversion and news feed download. Even in distributed mode some jobs will still only run on the name server if they weren&#039;t programmed to take advantage of the distributed setting.&lt;br&gt;&lt;br&gt;
+
+
+The &#039;&#039;&#039;Jobs List&#039;&#039;&#039; table contains a list of all jobs that either came with the Yioop software or were programmed by someone and added to the WORK_DIRECTORY/app/library/media_jobs folder (the folder does not exist unless created by that someone). Next to each job is an On/Off dropdown used to control if the job will be run when the MediaUpdater is running. Below is a brief description of the jobs that come with the Yioop software:
+
+* &#039;&#039;&#039;Analytics&#039;&#039;&#039; computes hourly, daily, monthly, etc statistics for search queries and group feed, tread, and page views. It also is used to compute statistics about the kinds of pages downloaded during a crawl.
+* &#039;&#039;&#039;BulkEmail&#039;&#039;&#039; is used to send email notifications from Yioop (registration, thread posts, etc.) when &#039;&#039;Send Mail From Media Updater&#039;&#039; is checked in the &#039;&#039;&#039;Server Settings&#039;&#039;&#039; activity.
+* &#039;&#039;&#039;FeedsUpdate&#039;&#039;&#039; is used to periodically down feed sources (rss, json, html, regex) to be included in the IndexDataFeed bundle used for news, sports, etc feeds. It also computes trending word statistics for these feeds.
+* &#039;&#039;&#039;Recommendation&#039;&#039;&#039; is used to compute group and thread suggestions for users.
+* &#039;&#039;&#039;TrendingHighlights&#039;&#039;&#039; is used to update trending value search sources (values on a web page that change frequently that a user wants to track) and to compute  hourly, daily, monthly, etc statistics for them. It also computes the landing page highlights for trending term, and feed subsearches that are being used for highlights.
+* &#039;&#039;&#039;VideoConvert&#039;&#039;&#039; is used to convert videos that have been uploaded to wiki pages into mp4 videos that work in all browsers.
+* &#039;&#039;&#039;WikiMedia&#039;&#039;&#039; is used to download feed podcasts to group wiki pages.
+
 EOD;
 $help_pages["en-US"]["Crawl_Mixes"] = <<< EOD
 page_type=standard
@@ -1414,11 +1451,12 @@ sort=aname

 END_HEAD_VARSThe &#039;&#039;&#039;Crawl Robot Set-up&#039;&#039;&#039; fieldset is used to provide websites that you crawl with information about who is crawling them.

-*The field &#039;&#039;&#039;Crawl Robot Name&#039;&#039;&#039; is used to say part of the USER-AGENT. It has the format:
-&lt;br&gt;
-Mozilla/5.0 (compatible; NAME_FROM_THIS_FIELD; YOUR_SITES_URL/bot)
-&lt;br&gt;
-The value set will be common for all fetcher traffic from the same queue server on site when downloading webpages. If you are doing crawls using multiple queue servers you should give the same value to each queue server. The value of YOUR_SITES_URL comes from the Server Settings - Name Server URL field.
+*The field &#039;&#039;&#039;Crawl Robot Name&#039;&#039;&#039; is used to the USER-AGENT header sent by your robot. It has the format:&lt;br&gt;
+&lt;code&gt;
+ Mozilla/5.0 (compatible; NAME_FROM_THIS_FIELD; YOUR_SITES_URL/bot)
+&lt;/code&gt;&lt;br&gt;
+The value sent will be common to all fetcher traffic from the same queue server on the site when downloading webpages.&lt;br&gt;
+If you are doing crawls using multiple queue servers you should give the same value to each queue server. The value of YOUR_SITES_URL comes from the Server Settings - Name Server URL field.
 *The &#039;&#039;&#039;Robot Instance&#039;&#039;&#039; field is used for web communication internal to a single yioop instance to help identify which queue server or fetcher under that queue server was involved. This string should be unique for each queue server in your Yioop set-up. The value of this string is written when logging requests between fetchers and queue servers and can be helpful in debugging.
 *The &#039;&#039;&#039;Robot Description&#039;&#039;&#039; field is used to specify the Public bot wiki page. This page can also be accessed and edited under Manage Groups by clicking on the wiki link for the Public group and then editing its Bot page. This wiki page is what&#039;s display when someone goes to the URL:&lt;br&gt;
 YOUR_SITES_URL/bot
@@ -1455,10 +1493,10 @@ END_HEAD_VARS&#039;&#039;This form appears when the Group Name is available to c
 ----

 &#039;&#039;&#039;Name&#039;&#039;&#039;
-* is used to specify the name of the new Group.
+* Is used to specify the name of the new Group.

 &#039;&#039;&#039;Register&#039;&#039;&#039;
-* says how other users are allowed to join the group:
+* Says how other users are allowed to join the group:
 * &lt;u&gt;No One&lt;/u&gt; means no other user can join the group (you can still invite
 other users).
 * &lt;u&gt;By Request&lt;/u&gt; means that other users can request the group owner to join
@@ -1467,7 +1505,7 @@ the group.


 &#039;&#039;&#039;Access&#039;&#039;&#039;
-* controls how users who belong/subscribe to a group
+* Controls how users who belong/subscribe to a group
 other than the owner can access that group.
 * &lt;u&gt;No Read&lt;/u&gt; means that a non-owner member of the group cannot read or
 write the group news feed and cannot read the group wiki.
@@ -1492,7 +1530,7 @@ wiki pages for the group&#039;s wiki.
 * Specifies How long the posts should be kept.

 &#039;&#039;&#039;Encryption&#039;&#039;&#039;
-* Whether the posts in this group should be encrypted on the server.
+* Says whether the posts in this group should be encrypted on the server.
 It does not enable encryption of wiki pages or media uploaded to a group.
 Enabling encryption means that posts will no longer be searchable. Once
 you choose a group as encrypted, you are not able to change it to be unencrypted.
@@ -1507,30 +1545,47 @@ EOD;
 $help_pages["en-US"]["Database_Setup"] = <<< EOD
 page_type=standard

+page_alias=
+
 page_border=solid-border

-title=Database Setup
+toc=true
+
+title=Database+Setup
+
+author=
+
+robots=
+
+description=
+
+alternative_path=

-END_HEAD_VARSThe database is used to store information about what users are
-allowed to use the admin panel and what activities and roles these users have.
-* The Database Set-up field-set is used to specify what database management
-system should be used, how it should be connected to, and what user name and
-password should be used for the connection.
+page_header=

-* Supported Databases
-** PDO (PHP&#039;s generic DBMS interface).
-** Sqlite3 Database.
-** Mysql Database.
+page_footer=

-* Unlike many database systems, if an sqlite3 database is being used then the
-connection is always a file on the current filesystem and there is no notion of
-login and password, so in this case only the name of the database is asked for.
-For sqlite, the database is stored in WORK_DIRECTORY/data.
+sort=aname

-* For single user settings with a limited number of news feeds, sqlite is
-probably the most convenient database system to use with Yioop. If you think you
-are going to make use of Yioop&#039;s social functionality and have many users,
-feeds, and crawl mixes, using a system like Mysql or Postgres might be more
+END_HEAD_VARSThe database is used to store information about what users are allowed to use the admin panel and what activities and roles these users have.
+* The Database Set-up field-set is used to specify what database management
+system should be used, how it should be connected to, and what user name and
+password should be used for the connection.
+
+* Supported Databases
+** PDO (PHP&#039;s generic DBMS interface).
+** Sqlite3 Database.
+** Mysql Database.
+
+* Unlike many database systems, if an sqlite3 database is being used then the
+connection is always a file on the current filesystem and there is no notion of
+login and password, so in this case only the name of the database is asked for.
+For sqlite, the database is stored in WORK_DIRECTORY/data.
+
+* For single user settings with a limited number of news feeds, sqlite is
+probably the most convenient database system to use with Yioop. If you think you
+are going to make use of Yioop&#039;s social functionality and have many users,
+feeds, and crawl mixes, using a system like Mysql or Postgres might be more
 appropriate.
 EOD;
 $help_pages["en-US"]["Debug_Display"] = <<< EOD
@@ -1560,7 +1615,7 @@ sort=aname

 END_HEAD_VARSThe &#039;&#039;&#039;Debug Display&#039;&#039;&#039; fieldset consists of checkboxes which control the debugging features of Yioop that are enabled.

-*The &#039;&#039;&#039;Error Info&#039;&#039;&#039; checkbox controls whether or not PHP errors, warnings and notices are output from Yioop. Whether the output is then to the browser or to a log file is controlled by the php.ini of your php install.
+*The &#039;&#039;&#039;Error Info&#039;&#039;&#039; checkbox controls whether or not PHP errors, warnings and notices that are output from Yioop. Whether the output is then displayed in the browser or to a log file is controlled by the php.ini of your php install.
 *The &#039;&#039;&#039;Query Info&#039;&#039;&#039; checkbox controls whether or not Yioop appends to each page information about how long each database and search query took.
 *The &#039;&#039;&#039;Test Info&#039;&#039;&#039; checkbox controls whether or not Yioop unit tests are visible from the Yioop site. If checked, the &#039;&#039;&#039;Test Info&#039;&#039;&#039; link takes one to the unit tests.

@@ -1726,19 +1781,25 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARSThe &#039;&#039;&#039;Edit Locale&#039;&#039;&#039; form can be used to specify how various message strings in Yioop are translated in different languages.
-
-The table below has two columns: a column of string identifiers and a column of translations. A string identifier refers to a location in the code marked as needing to be translated, the corresponding translation in that row is how it should be translated for the current locale. Identifiers typically specify the code file in which the identifier occurs. For example, the identifier
- serversettings_element_name_server
-would appear in the file views/elements/server_settings.php . To see where this identifier occurs one could open that file and search for this string.
-
-If no translation exists yet for an identifier the translation value for that row will appear in red. Hovering the mouse over this red field will show the translation of this field in the default locale (usually English).
+sort=aname

-The &#039;&#039;&#039;Show dropdown&#039;&#039;&#039; allows one to show either all identifiers or just those missing translations. The filter field let&#039;s one to see only identifiers that contain the filter as a substring.
+END_HEAD_VARSThe &#039;&#039;&#039;Edit Locale&#039;&#039;&#039; form can be used to specify how various message strings in Yioop are translated in different languages.&lt;br&gt;&lt;br&gt;
+
+The edit locale table has two columns: a column of string identifiers and a column of translations. A string identifier refers to a location in the code marked as needing to be translated, the corresponding translation in that row is how it should be translated for the current locale. Identifiers typically specify the code file in which the identifier occurs. For example, the identifier&lt;br&gt;
+&lt;code&gt;
+ serversettings_element_name_server
+&lt;/code&gt;&lt;br&gt;
+would appear in the file views/elements/server_settings.php . To see where this identifier occurs one could open that file and search for this string.&lt;br&gt;&lt;br&gt;
+
+If no translation exists yet for an identifier the translation value for that row will appear in red. Hovering the mouse over this red field will show the translation of this field in the default locale (usually English). &lt;br&gt;&lt;br&gt;
+
+The &#039;&#039;&#039;Show Dropdown&#039;&#039;&#039; allows one to show either all identifiers or just those missing translations. The filter field let&#039;s one to see only identifiers that contain the filter as a substring.
 EOD;
 $help_pages["en-US"]["Editing_a_Crawl_Mix"] = <<< EOD
 page_type=standard
@@ -1953,22 +2014,23 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARS&#039;&#039;&#039;Machine Information&#039;&#039;&#039; shows the currently known about machines.
-
-&lt;br /&gt;
-
-This list always begins with the &#039;&#039;&#039;Name Server&#039;&#039;&#039; itself and a toggle to control whether or not the Media Updater process is running on the Name Server. This allows you to control whether or not Yioop attempts to update its RSS (or Atom) search sources on an hourly basis. Yioop also uses the Media updater to convert videos that have been uploaded into mp4 and webm if ffmpeg is installed.
-
-&lt;br /&gt;
-
-There is also a link to the log file of the Media Updater process. Under the Name Server information is a dropdown that can be used to control the number of current machine statuses that are displayed for all other machines that have been added. It also might have next and previous arrow links to go through the currently available machines.
-
-&lt;br /&gt;
+sort=aname

+END_HEAD_VARSThe &#039;&#039;&#039;Machines&#039;&#039;&#039; section of the Manage Machines activity allows one to control and see the status of queue server, fetcher, and mirror processes associated with the current instance of Yioop. If one is running the media updater in distributed mode one can also see the status of this process for each machine. The &#039;+&#039; button next to Machines can be used to add a new machine. Each machine corresponds to the url of one location where an instance of Yioop is running that one wants to associate with the current name server. By default, the name server itself is only machine listed and it has one queue server and two fetcher statuses listed. Be aware tat in addition to adding the machine through this form, to properly associate a Yioop instance with a name server, you must also log in to the instance&#039;s web interface and configure its server settings to match the current name server.&lt;br&gt;
+
+&lt;br /&gt;
+
+For each machine listed under Machines, there is a delete link to remove it from te list of managed machines, and then there are On/Off toggles for each process (such as queue server or fetcher) that it contains. These toggles are green if the process is currently running, yellow if the process was started but is not currently running correctly, and red, if the process is stopped. For each process, there is also a link to a log file which can be used to see recent activity (from most recent at top to less recent as one scrolls down) of that process.
+
+&lt;br /&gt;
+
+
 {{right|[[https://www.seekquarry.com/?c=static&amp;p=Documentation#GUI%20for%20Managing%20Machines%20and%20Servers| Learn More.]]}}
 EOD;
 $help_pages["en-US"]["Manage_Advertisements"] = <<< EOD
@@ -2149,11 +2211,10 @@ page_footer=

 sort=aname

-END_HEAD_VARS&#039;&#039;&#039;Media Sources&#039;&#039;&#039; are used to specify how Yioop should handle news feeds and podcast sites.
+END_HEAD_VARS&#039;&#039;&#039;Media Sources&#039;&#039;&#039; are used to specify how Yioop should handle news feeds, podcast, and trending value sites. The &#039;&#039;&#039;Add Media Source&#039;&#039;&#039; form lets you add new media sources. What this form looks like depends on the &#039;&#039;&#039;Type&#039;&#039;&#039; dropdown chosen. Below we describe the form for each of the possible choices of type:

 &lt;br /&gt;

-
 An &#039;&#039;&#039;RSS media source&#039;&#039;&#039; can be used to add an RSS or Atom feed (it auto-detects which kind) to the list of feeds which are downloaded hourly when Yioop&#039;s Media Updater is turned on. Besides the name you need to specify the URL of the feed in question. The Category field search usually be left at news. If you want to specify additional categories such as weather or sports, you typically want to create a mix that searches the default index with the keyword media:your_category injects, and then make a new subsearch with that mix.
 This will allow your new category to show up on the Tools/More/Other Searches page.

@@ -2208,7 +2269,7 @@ changing the 04 above to 03, 02, 01 varies the group of cities. Most of the data
  Language: English
  Category: weather
  Channel: /&lt;pre(?:.+?)&gt;([^&lt;]+)/m
- Item: /
+ Item: /
 /
  Title: /^(.+?)\s\s\s+/
  Description: /\s\s\s+(.+?)$/
@@ -2251,11 +2312,11 @@ Yioop supports the downloading of single video or audio file sources, as well as
 &lt;br /&gt;

 A &#039;&#039;&#039;Scrape podcast source&#039;&#039;&#039; is like a &#039;&#039;&#039;Feed Podcast source&#039;&#039;&#039;, but where one has a HTML or XML page which has a periodically updated link to a video or audio source. For example, it might be an evening news web site.
-The URL field should be the page with the periodically updated link. The &#039;&#039;&#039;Aux Url XPaths&#039;&#039;&#039; field, if not blank, should be a sequence of XPaths or Regexes one per line. The first line will be applied to the page to obtain a next url to download. The next line&#039;s XPath or Regex is applied to this file and so on. The final url generated should be to the HTML or XML page that contains the media source for that day. Finally, on the page for the given day, &#039;&#039;&#039;Download XPath&#039;&#039;&#039; should be the XPath of the url of the video or audio file to download.
+The &#039;&#039;&#039;URL&#039;&#039;&#039; field should be the page with the periodically updated link. The &#039;&#039;&#039;Aux Url XPaths&#039;&#039;&#039; field, if not blank, should be a sequence of XPaths or Regexes one per line. The first line will be applied to the page to obtain a next url to download. The next line&#039;s XPath or Regex is applied to this file and so on. The final url generated should be to the HTML or XML page that contains the media source for that day. Finally, on the page for the given day, &#039;&#039;&#039;Download XPath&#039;&#039;&#039; should be the XPath of the url of the video or audio file to download.
 If a regex is used rather than an XPath, then the first capture group of the regex should give the url. A regex can be followed by json| to indicate the first capture group should be converted to a json object. To reference a path of through sub-objects of this object to a url. As an example, consider the following, which at some point, could download the Daily News  Scrape Podcast to a wiki group:

  Type: Scrape Podcast
- Name: Dailly News Podcast
+ Name: Daily News Podcast
  URL: https://www.somenetwork.com/daily-news
  Language: English
  Aux Url XPaths:
@@ -2266,10 +2327,52 @@ If a regex is used rather than an XPath, then the first capture group of the reg

 The initial page to be download will be: https://www.somenetwork.com/daily-news. On this page, we will use the first Aux Path to find a string in the page that matches /(https\:\/\/www.somenetwork.com\/daily-news\/video\/daily-[^\&quot;]+)\&quot;/. The contents matching between the parentheses is the first capture group and will be the next url to download. SO for example, one might get a url:
  https://cdn.somenetwork.com/daily-news/video/daily-safghdsjfg
-This url is then downloaded and a string matching  the pattern /window\.\_\_data\s*\=\s*([^
-]+\}\;)/ is found. The capture group portion of this string consists of what matches ([^
+This url is then downloaded and a string matching  the pattern /window\.\_\_data\s*\=\s*([^
+]+\}\;)/ is found. The capture group portion of this string consists of what matches ([^
 ]+\}\;) is then converted to a JSON object, because of the json| in the Aux Url XPath. From this JSON object, we look at the video field, then the current subfields, its 0 subfield, and finally, the publicUrl field. This is the url we download next. Lastly, the download XPath is then used to actually get the final video link from this downloaded page.
-Once this video is downloaded, it is stored in the Podcasts page&#039;s resource folder of the the My Private Group wiki group in a file with a name in the format: %Y-%m-%d.mp4.
+Once this video is downloaded, it is stored in the Podcasts page&#039;s resource folder of the the My Private Group wiki group in a file with a name in the format: %Y-%m-%d.mp4.
+
+A &#039;&#039;&#039;Trending value source&#039;&#039;&#039; is a value on a web page that one would like to track using Yioop&#039;s trending search mechanism. The Name field is the name to use for the trending value. The URL field should be the page with the periodically updated value. &#039;&#039;&#039;Category&#039;&#039;&#039; should be the trends category (a collection of trending values) one would like to track this value with. &#039;&#039;&#039;Group Within Category&#039;&#039;&#039; is the default name of the key that will be associated with the value found on this page. &#039;&#039;&#039;Trend Value Regex&#039;&#039;&#039; is a regular expression to match against the downloaded URL. If it matches and the expression has one capture group, then tat capture group will be used as the value for a particular download time. If it has two or more capture groups, the first two capture groups are used to give a key name, value pair for a particular download time. As an example,
+
+ Name: Yioop Ticker
+ URL: https://my-great-stock-quotes/yioop
+ Language: English
+ Category: stocks
+ Group Within Category: Yioop Price
+ Trend Value Regex: /Yioop\:\s+(\d+\.\d+)/
+
+Here there is only one capture group (\d+\.\d+), so searching on trending:stocks, one would see all the hour, weekly, etc values for the trending values with that category. One such row would be Yioop Price whose values would be computed based on the numbers extracted according to this regex&#039;s (\d+\.\d+) capture group.
+
+EOD;
+$help_pages["en-US"]["Media_Updater"] = <<< EOD
+page_type=standard
+
+page_alias=
+
+page_border=solid-border
+
+toc=true
+
+title=
+
+author=
+
+robots=
+
+description=
+
+alternative_path=
+
+page_header=
+
+page_footer=
+
+sort=aname
+
+END_HEAD_VARSThe &#039;&#039;&#039;Media Updater&#039;&#039;&#039; section of the Manage Machines activity let&#039;s one control the media updater and see, when the updater is not being run in distributed mode, the status of this updater on the name server. The Media Updater is responsible for handling periodic jobs carried out by Yioop. Examples of periodic jobs include updating its RSS (or Atom) search sources on an hourly basis, downloading podcasts to wiki pages, computing query and view analytics,  convert videos that have been uploaded into mp4 and webm if ffmpeg is installed, and sending out notification emails. &lt;br&gt;&lt;br&gt;
+
+The &#039;&#039;&#039;Configure Media Job&#039;&#039;&#039; link takes one to a page were one can control which media jobs are currently running. Beneath there is On/Off toggle for the media updater together with a color indicator (green on/red off) for whether the Media Updater is currently running. Finally, there is a &#039;&#039;&#039;Log&#039;&#039;&#039; file link whic can be used to see the log entries (most recent at top) of Media Updater process.
+
 EOD;
 $help_pages["en-US"]["Monetization"] = <<< EOD
 page_type=standard
@@ -2507,7 +2610,7 @@ page_border=solid-border

 toc=true

-title=Proxy server
+title=Proxy+server

 author=

@@ -2515,12 +2618,70 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARS* Yioop can make use of a proxy server to do web
-crawling.
+sort=aname
+
+END_HEAD_VARSYioop can make use of a proxy server to do web crawling. This fieldset is used to configure the proxies to be used.&lt;br&gt;
+&lt;br&gt;
+
+The &#039;&#039;&#039;Tor Proxy&#039;&#039;&#039; field is used to specify the onion router proxy used to crawl TOR web pages (Dark Web). The default is &lt;code&gt;127.0.0.1:9150&lt;/code&gt;. This corresponds to the proxy on your machine which would be active by default if you have the Tor Browser running. If you instead install a tor relay service (on a MacOS you could do &lt;code&gt;brew install tor&lt;/code&gt;), then start this service in the default way, the proxy would be &lt;code&gt;127.0.0.1:9050&lt;/code&gt;. If you do not intend to crawl tor pages you can safely ignore this field.
+&lt;br&gt;&lt;br&gt;
+
+Except for onion urls, Yioop does not make use of the Tor Proxy for crawling. You can configure Yioop to make use of a proxy for crawling general web pages by checking the  &#039;&#039;&#039;Crawl via Proxies&#039;&#039;&#039; checkbox. This reveals a textarea were you can enter one proxy/line. The format for a line is either:
+* &lt;code&gt;address:port&lt;/code&gt;
+* &lt;code&gt;address:port:type&lt;/code&gt;
+* or &lt;code&gt;address:port:type:username:password&lt;/code&gt;
+&lt;br&gt;&lt;br&gt;
+
+As an example, one might have a line like:&lt;br&gt;
+&lt;code&gt;
+ 45.192.173.164:1080:socks5_hostname
+&lt;/code&gt;
+&lt;br&gt;
+
+Other possibilities for the proxy type are: &lt;code&gt;http&lt;/code&gt; (default), socks4, socks4a, socks5, or the cURL flag number for the desired protocol. For example, the number 5 corresponds to socks5, 7 to socks5_hostname.
+
+
+
+
+
+EOD;
+$help_pages["en-US"]["Query_Result_Mappings"] = <<< EOD
+page_type=standard
+
+page_alias=
+
+page_border=solid-border
+
+toc=true
+
+title=
+
+author=
+
+robots=
+
+description=
+
+alternative_path=
+
+page_header=
+
+page_footer=
+
+sort=aname
+
+END_HEAD_VARSA &#039;&#039;&#039;Query Map&#039;&#039;&#039; associates a particular query with a list of urls. These urls will be the
+first urls shown when a user searches on the given query.&lt;br&gt;&lt;br&gt;
+
+Entering a query and clicking &#039;&#039;&#039;Load&#039;&#039;&#039; will reveal a textarea containing the current list of
+urls associated with the query (if any). One can then edit/modify this area to list the desired
+urls, one url/line. Clicking &#039;&#039;&#039;Save&#039;&#039;&#039; will then record the changes.
 EOD;
 $help_pages["en-US"]["Recovery_Type"] = <<< EOD
 page_type=standard
@@ -2646,6 +2807,9 @@ END_HEAD_VARS&#039;&#039;&#039;Web Scrapers&#039;&#039;&#039; are used to help Y

 &#039;&#039;&#039;Signature&#039;&#039;&#039; is used to detect when a particular Web Scraper should be used. It should consist of an XPath query which would evaluate to a non-empty set of elements in the case of a page the scraper might work for.

+&#039;&#039;&#039;Priority&#039;&#039;&#039; is used to determine which scraper to apply to a web page when a page matches multiple scraper signatures. Yioop chooses the highest (larger) priority scraper that matches. If two scrapers have the same priority it would choose the first one it found matching. The priority dropdown allows one to set the priority of a scraper.
+
+
 &#039;&#039;&#039;Text XPath&#039;&#039;&#039; is used to specify an xpath to the most important content of a page for summarization.

 &#039;&#039;&#039;Delete XPaths&#039;&#039;&#039;is used to specify xpaths, one per line, of content under the Text Xpath portion of the web page, that should be non considered for summarizations.
@@ -2682,7 +2846,19 @@ sort=aname
 END_HEAD_VARSThe &#039;&#039;&#039;Search Access&#039;&#039;&#039; fieldset has checkboxes that control which interfaces can be used to get search query results from Yioop.

 * The &#039;&#039;&#039;Web&#039;&#039;&#039; checkbox controls whether or not a traditional web search through the Yioop instance&#039;s landing page can be done.
-* The &#039;&#039;&#039;RSS&#039;&#039;&#039; checkbox controls whether or not search queries in RSS format are available. If so, there a query string of the form ?q=some_search_query&amp;f=rss will output search results in rss format, a query string in RSS format will be output. This checkbox needs to be checked if you are using Yioop in a situation with multiple queue servers. This switch also enables queries of the form ?q=some_search_query&amp;f=json, ?q=some_search_query&amp;f=json&amp;callback=some_function, and ?q=some_search_query&amp;f=serial. These are respectively JSON format output, JSONP format output, and serialized PHP object format output.
+* The &#039;&#039;&#039;RSS&#039;&#039;&#039; checkbox controls whether or not search queries in RSS format are available. If so, then a query string of the form&lt;br&gt;
+&lt;code&gt;
+?q=some_search_query&amp;f=rss
+&lt;/code&gt;&lt;br&gt;
+will output search results in rss format, a query string in RSS format will be output. This checkbox needs to be checked if you are using Yioop in a situation with multiple queue servers. This switch also enables queries of the form&lt;br&gt;
+&lt;code&gt;
+?q=some_search_query&amp;f=json, ?q=some_search_query&amp;f=json&amp;callback=some_function,
+&lt;/code&gt;&lt;br&gt;
+and&lt;br&gt;
+&lt;code&gt;
+?q=some_search_query&amp;f=serial.
+&lt;/code&gt;&lt;br&gt;
+The above queries demonstrate respectively JSON format output, JSONP format output, and serialized PHP object format output.
 * The &#039;&#039;&#039;API&#039;&#039;&#039; checkbox controls whether or not Yioop can be used as PHP library using the  Yioop Search Function API to return search results. This is described in the [[https://www.seekquarry.com/p/Documentation#Embedding%20Yioop%20in%20an%20Existing%20Site|Embedding Yioop]] section of the Yioop Documentation.

 EOD;
@@ -2816,6 +2992,39 @@ sort=aname

 END_HEAD_VARSEach machine in a cluster of Yioop instances with the same Name Server has a channel, defaulting to 0. The &#039;&#039;&#039;Server Channel&#039;&#039;&#039; drop down is populated with a list of channels of currently configured machines in the cluster. If there are no configured machines and empty message is displayed. The Server Channel of a crawl is used to specify which machines in the cluster will participate in the crawl -- only machine with the same channel as that of the crawl will participate. Using this mechanism it is possible to set up several ongoing simultaneous crawls provided they are on different channels.
 EOD;
+$help_pages["en-US"]["Session_Parameters"] = <<< EOD
+page_type=standard
+
+page_alias=
+
+page_border=solid-border
+
+toc=true
+
+title=
+
+author=
+
+robots=
+
+description=
+
+alternative_path=
+
+page_header=
+
+page_footer=
+
+sort=aname
+
+END_HEAD_VARSThese parameters control properties of the sessions of authenticated (logged-in) users.
+* &#039;&#039;&#039;Time Zone&#039;&#039;&#039; controls the time zone with respect to which times for this instance of Yioop are calculated. This will affect the display of dates and times throughout the Yioop instance.
+* &#039;&#039;&#039;Token Name&#039;&#039;&#039; controls the name of the cross-site request forgery prevention token that appears in urls on this site when a user is logged-in.
+* &#039;&#039;&#039;Session Name&#039;&#039;&#039; is the name of the HTTP cookie used to identify the current session.
+* &#039;&#039;&#039;Autologout&#039;&#039;&#039; is how long an inactive session is maintained before a user is automatically logged out.
+It does not remove the user&#039;s cookie from their browser, it just logs them out of the current session.
+* &#039;&#039;&#039;Cookie Consent Expires&#039;&#039;&#039; controls how long from when a user clicks their consent to allow cookies, until the next time a request for consent is required. Cookie consent is often one piece of complying with privacy legislation such as the California Consumer Privacy Act (CCPA) which requires allowing user&#039;s to opt out the of selling their information and the European Union&#039;s General Data Protection Regulation (GDPR), which requires user&#039;s to give their consent to the use of cookies. The Yioop system does not set cookies unless consent has been granted, but if you are using Yioop together with an external ad server, it migt be possible for the latter to set cookies. Also, Yioop support&#039;s MathJax (for math formatting on Wiki pages) via a CDN by default which might set a cookie. You can always install MathJax on your site and use this directly to avoid this potentiality.
+EOD;
 $help_pages["en-US"]["Start_Crawl"] = <<< EOD
 page_type=standard

@@ -2864,22 +3073,35 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARS&#039;&#039;&#039;Subsearches&#039;&#039;&#039; are specialized search hosted on a Yioop site other than the default index. For example, a site might have a usual web search and also offer News and Images subsearches. This form let&#039;s you set up such a subsearch.
-
-&lt;br /&gt;
-
-A list of links to all the current subsearches on a Yioop site appears at the
- site_url?a=more
-page. Links to some of the subsearches may appear at the top left hand side of of the default landing page provided the Pages Options : Search Time : Subsearch checkbox is checked.
-
-&lt;br /&gt;
-
-The &#039;&#039;&#039;Folder Name&#039;&#039;&#039; of a subsearch is the name that appears as part of the query string when doing a search restricted to that subsearch. After creating a subsearch, the table below will have a &#039;&#039;&#039;Localize&#039;&#039;&#039; link next to its name. This lets you give names for your subsearch on the More page mentioned above with respect to different languages.
+sort=aname

+END_HEAD_VARS&#039;&#039;&#039;Subsearches&#039;&#039;&#039; are specialized searches hosted on a Yioop site other than the default index. For example, a site might have a usual web search and also offer News and Images subsearches. A list of links to all the current subsearches on a Yioop site appear under the hamburger menu on the search landing page provided the Pages Options : Search Time : Subsearch checkbox is checked. The &#039;&#039;&#039;Add a Subsearch&#039;&#039;&#039; form let&#039;s you set up such a subsearch. The components of this form are as follows:
+
+&lt;br&gt;
+
+The &#039;&#039;&#039;Folder Name&#039;&#039;&#039; of a subsearch is the name that appears as part of the query string when doing a search restricted to that subsearch. After creating a subsearch, the table below the form will have a &#039;&#039;&#039;Localize&#039;&#039;&#039; link next to its name. This lets you give names for your subsearch with respect to different languages.
+
+&lt;br&gt;
+
+The &#039;&#039;&#039;Source&#039;&#039;&#039; dropdown on the  form let&#039;s you choose one of the previously crawled indexes, defined crawl mixes, or defined trend categories to use for the subsearch. If &#039;&#039;&#039;Trendf Category&#039;&#039;&#039; is chosen the appearance of the form is slightly different. We first cover the remainder of the form for the selection of a crawl index or a mix asa a type.
+
+&lt;br&gt;
+
+The &#039;&#039;&#039;Results Per Page&#039;&#039;&#039; dropdown on the form controls the number of search results that are displayed per page for the  search results for the subsearch. For standard search this is usually 10, but for other kinds of searches such as images or news it is often convenient to have a larger number.
+
+&lt;br&gt;
+
+The &#039;&#039;&#039;Landing Priority&#039;&#039;&#039; priority dropdown controls whether top five link highlight of the default subsearch search is displayed on the landing page under the search  bar, and if so, its placement. If &#039;&#039;No Highlight&#039;&#039; is selected then no highlight is shown; otherwise, subsearches with highlights are presented in descending order of their priority. In the case the subsearch is a Trending Category subsearch, than rather then select the top five link as the highlight, a selection of nine or so randomly chosen trending terms for thhat category are used.
+
+The &#039;&#039;&#039;Default Query&#039;&#039;&#039; field controls the query that should be run by default if the user hasn&#039;t entered a query. For example, for a news feed one might have &lt;code&gt;lang:default-major&lt;/code&gt;. This says if the user doesn&#039;t enter a query search &lt;code&gt;lang:default-major&lt;/code&gt; to get the most recent news in the default language.
+
+When &#039;&#039;&#039;Trend Category &#039;&#039;&#039; is selected as the search source, the Add Subsearch form has two additional dropdowns: &#039;&#039;&#039;Category&#039;&#039;&#039; and &#039;&#039;&#039;Sort&#039;&#039;&#039;. The former allows the user to select from the available trend categories a trend category for this subsearch, Sort lets the user specify whether the trends should be sorted according to the names of the terms for that trend or the scores for terms, and if so, whether the sort should be ascending to descending.
 EOD;
 $help_pages["en-US"]["Suffix_Phrases"] = <<< EOD
 page_type=standard
@@ -3020,22 +3242,25 @@ robots=

 description=

+alternative_path=
+
 page_header=

 page_footer=

-END_HEAD_VARSThe &#039;&#039;&#039;Work Directory&#039;&#039;&#039; is a folder used to store all the customizations of this instance of Yioop.
-This field should be a complete file system path to a folder that exists.
-It should use forward slashes. For example:
-
- /some_folder/some_subfolder/yioop_data
-(more appropriate for Mac or Linux) or
- c:/some_folder/some_subfolder/yioop_data
-(more appropriate on a Windows system).
+sort=aname

-If you decide to upgrade Yioop at some later date you only have to replace the code folder
-of Yioop and set the Work Directory path to the value of your pre-upgrade version. For this
-reason the Work Directory should not be a subfolder of the Yioop code folder.
+END_HEAD_VARSThe &#039;&#039;&#039;Work Directory&#039;&#039;&#039; is a folder used to store all the customizations of this instance of Yioop. This field should be a complete file system path to a folder that exists. It should use forward slashes. For example:
+&lt;br&gt;
+
+ /some_folder/some_subfolder/yioop_data
+(more appropriate for Mac or Linux) or
+ c:/some_folder/some_subfolder/yioop_data
+(more appropriate on a Windows system).
+&lt;br&gt;
+
+If you decide to upgrade Yioop at some later date you only have to replace the code folder
+of Yioop and set the Work Directory path to the value of your pre-upgrade version.
 EOD;
 $help_pages["fr-FR"]["Account_Registration"] = <<< EOD
 page_type=page_alias
@@ -3109,3 +3334,4 @@ configurer un serveur SMTP sortant pour envoyer l&#039;e-mail.
 l&#039;utilisateur avant que l&#039;utilisateur ne soit autoris&eacute; &agrave; utiliser son compte.

 EOD;
+
diff --git a/src/css/search.css b/src/css/search.css
index 43be08545..9b936b9b7 100755
--- a/src/css/search.css
+++ b/src/css/search.css
@@ -2621,7 +2621,7 @@ td.no-border
 }
 .mobile .search-sources-table pre
 {
-    max-width: 300px;
+    max-width: 230px;
 }
 td.instruct
 {
@@ -3089,16 +3089,18 @@ table.wikitable > caption
 }
 #mobile-help
 {
-    margin: 2.5%;
+    clear:both;
+    float:none;
+    margin: 1.5%;
     position: absolute;
-    top: 142px;
-    width: 95%;
+    top: 58px;
+    width: 360px;
 }
 .help-pane
 {
     display : none;
     width: 100%;
-    border:2px solid #0094ff;
+    border: 2px solid #0094ff;
 }
 .small-margin-help-pane
 {
diff --git a/src/data/public_default.db b/src/data/public_default.db
index bdb8cd7c7..c2be9f050 100644
Binary files a/src/data/public_default.db and b/src/data/public_default.db differ
diff --git a/src/executables/MediaUpdater.php b/src/executables/MediaUpdater.php
index d09cd8990..2172a7b98 100644
--- a/src/executables/MediaUpdater.php
+++ b/src/executables/MediaUpdater.php
@@ -178,7 +178,7 @@ class MediaUpdater implements CrawlConstants
         L\crawlLog("Done checking Name Server for Media Updater properties");
     }
     /**
-     *
+     * @param array $jobs_list
      */
     public function loadJobs($jobs_list)
     {
diff --git a/src/executables/QueueServer.php b/src/executables/QueueServer.php
index c09bf406e..910bbea39 100755
--- a/src/executables/QueueServer.php
+++ b/src/executables/QueueServer.php
@@ -2995,7 +2995,13 @@ class QueueServer implements CrawlConstants, Join
         if (!in_array($doc_type, $this->indexed_file_types)) {
             return false;
         }
-        if (!C\nsdefined("KEEP_SPAM_DOMAINS")) {
+        // try to eliminate spam sites, but still crawl onion urls
+        $tld = "";
+        if ($host = parse_url($url, \PHP_URL_HOST)) {
+            $host_parts = explode(".", $host);
+            $tld = $host_parts[count($host_parts) - 1];
+        }
+        if (!C\nsdefined("KEEP_SPAM_DOMAINS") && $tld != 'onion') {
             $consonants = "bcdfghjklmnpqrstvwxyz";
             $vowels = "aeiouy";
             $host = @parse_url($url, PHP_URL_HOST);
diff --git a/src/library/FetchUrl.php b/src/library/FetchUrl.php
index e7ef26937..449eebc5a 100755
--- a/src/library/FetchUrl.php
+++ b/src/library/FetchUrl.php
@@ -182,8 +182,8 @@ class FetchUrl implements CrawlConstants
             curl_setopt($sites[$i][0], CURLOPT_TIMEOUT, C\PAGE_TIMEOUT);
             if (stripos($url,'.onion') !== false && $tor_proxy != "") {
                 curl_setopt($sites[$i][0], CURLOPT_PROXY, $tor_proxy);
-                //CURLPROXY_SOCKS5_HOSTNAME = 7
-                curl_setopt($sites[$i][0], CURLOPT_PROXYTYPE, 7);
+                curl_setopt($sites[$i][0], CURLOPT_PROXYTYPE,
+                    CURLPROXY_SOCKS5_HOSTNAME);
                 if ($timer) {
                     crawlLog("Using Tor proxy for $url..");
                 }
@@ -192,23 +192,39 @@ class FetchUrl implements CrawlConstants
                 $proxy_server = $proxy_servers[$select_proxy];
                 $proxy_parts = explode(":", $proxy_server);
                 $proxy_ip = $proxy_parts[0];
-                if (!isset($proxy_parts[2]) ||
-                    strtolower($proxy_parts[2]) == 'http') {
-                    $proxy_type = CURLPROXY_HTTP;
-                } else if (strtolower($proxy_parts[2]) == 'socks5') {
-                    $proxy_type = CURLPROXY_SOCKS5;
-                } else {
-                    $proxy_type = $proxy_parts[2];
-                }
-                if (isset($proxy_parts[1])) {
+                if (!empty($proxy_parts[1])) {
                     $proxy_port = $proxy_parts[1];
                 } else {
                     $proxy_port = "80";
                 }
+                $proxy_type = CURLPROXY_HTTP;
+                if (!empty($proxy_parts[2])) {
+                    $pre_proxy_type = strtolower($proxy_parts[2]);
+                    if ($pre_proxy_type != 'http') {
+                        if ($pre_proxy_type == 'socks4') {
+                            $proxy_type = CURLPROXY_SOCKS4;
+                        } else if ($pre_proxy_type == 'socks4a') {
+                            $proxy_type = CURLPROXY_SOCKS4A;
+                        } else if ($pre_proxy_type == 'socks5') {
+                            $proxy_type = CURLPROXY_SOCKS5;
+                        } else if ($pre_proxy_type == 'socks5_hostname') {
+                            $proxy_type = CURLPROXY_SOCKS5_HOSTNAME;
+                        } else {
+                            $proxy_type = $proxy_parts[2];
+                        }
+                    }
+                }
                 curl_setopt($sites[$i][0], CURLOPT_PROXY,
                     "$proxy_ip:$proxy_port");
                 curl_setopt($sites[$i][0], CURLOPT_PROXYTYPE,
                     $proxy_type);
+                if (!empty($proxy_parts[4])) {
+                    // $proxy_parts[3] = username; $proxy_parts[4] = password
+                    curl_setopt($sites[$i][0], CURLOPT_PROXYUSERPWD,
+                        $proxy_parts[3] . ":" . $proxy_parts[4]);
+                    curl_setopt($sites[$i][0], CURLOPT_PROXYAUTH,
+                        CURLAUTH_BASIC);
+                }
                 if ($timer) {
                     crawlLog("Selecting proxy $select_proxy for $url");
                 }
diff --git a/src/library/StochasticTermSegmenter.php b/src/library/StochasticTermSegmenter.php
index 089cfaa05..bab52ca31 100644
--- a/src/library/StochasticTermSegmenter.php
+++ b/src/library/StochasticTermSegmenter.php
@@ -537,7 +537,12 @@ class StochasticTermSegmenter
      */
     private function getScore($frequency)
     {
-      return -log($frequency / $this->dictionary_file["N"]);
+        if (!empty($this->dictionary_file["N"]) &&
+            is_numeric($this->dictionary_file["N"])) {
+            return -log($frequency / $this->dictionary_file["N"]);
+        } else {
+            return 0;
+        }
     }
     /**
      * Adds a term to the dictionary
diff --git a/src/library/processors/PdfProcessor.php b/src/library/processors/PdfProcessor.php
index 76b8348e3..6771c1cad 100755
--- a/src/library/processors/PdfProcessor.php
+++ b/src/library/processors/PdfProcessor.php
@@ -214,7 +214,7 @@ class PdfProcessor extends TextProcessor
                 }
                 $temp_file = $temp_dir . L\crawlHash($stream_data) . ".png";
                 if ($image) {
-                    imagepng($image, $temp_file);
+                    @imagepng($image, $temp_file);
                     $ocr_data = ComputerVision::recognizeText($temp_file,
                         [$lang]);
                     if (!empty($ocr_data)) {
diff --git a/src/locale/ar/configure.ini b/src/locale/ar/configure.ini
index f55a62a82..827fa57ba 100755
--- a/src/locale/ar/configure.ini
+++ b/src/locale/ar/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "ويكي الوجهة:"
 searchsources_element_aux_url_xpath = "Aux Url مسارات Xpath:"
 searchsources_element_link_xpath_text = "تحميل Xpath:"
 searchsources_element_trend_category_group = "المجموعة ضمن الفئة:"
-searchsources_element_trending_xpath = "الاتجاه قيمة XPath:"
+searchsources_element_trending_regex = "الاتجاه قيمة Regex:"
 searchsources_element_media_sources = "مصادر إعلامية"
 searchsources_element_subsearches = "سوبسيرتشيس الحالي"
 searchsources_element_confirm_delete = "هل تريد حقا واضحة مسبقا بتحميل الأخبار تغذية البيانات ؟ "
diff --git a/src/locale/bn/configure.ini b/src/locale/bn/configure.ini
index 6619d0e81..48f7cbeba 100755
--- a/src/locale/bn/configure.ini
+++ b/src/locale/bn/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "উইকি গন্তব্য:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "ডাউনলোড Xpath:"
 searchsources_element_trend_category_group = "গ্রুপ মধ্যে বিভাগ:"
-searchsources_element_trending_xpath = "প্রবণতা মান, XPath:"
+searchsources_element_trending_regex = "প্রবণতা মান Regex:"
 searchsources_element_media_sources = "মিডিয়া উত্স"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "আপনি কি সত্যিই পরিষ্কার করতে চান, পূর্বে ডাউনলোড খবর ফিড তথ্য?"
diff --git a/src/locale/de/configure.ini b/src/locale/de/configure.ini
index a2d385be0..23cdf4c0b 100755
--- a/src/locale/de/configure.ini
+++ b/src/locale/de/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Ziel:"
 searchsources_element_aux_url_xpath = "Aux-Url XPaths:"
 searchsources_element_link_xpath_text = "Download Xpath:"
 searchsources_element_trend_category_group = "Gruppe Innerhalb Der Kategorie:"
-searchsources_element_trending_xpath = "Trend Wert XPath:"
+searchsources_element_trending_regex = "Trend Wert-Regex:"
 searchsources_element_media_sources = "Media-Quellen"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Tun Sie wirklich l&ouml;schen m&ouml;chten, die Sie bereits heruntergeladen-news-feed-Daten?"
diff --git a/src/locale/en_US/configure.ini b/src/locale/en_US/configure.ini
index 633b46636..55a51ceb1 100644
--- a/src/locale/en_US/configure.ini
+++ b/src/locale/en_US/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Destination:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "Download Xpath:"
 searchsources_element_trend_category_group = "Group Within Category:"
-searchsources_element_trending_xpath = "Trend Value XPath:"
+searchsources_element_trending_regex = "Trend Value Regex:"
 searchsources_element_media_sources = "Media Sources"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Do you really want to clear previously downloaded news feed data?"
diff --git a/src/locale/es/configure.ini b/src/locale/es/configure.ini
index 9ba110b98..49a19c091 100755
--- a/src/locale/es/configure.ini
+++ b/src/locale/es/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki De Destino:"
 searchsources_element_aux_url_xpath = "Aux Url Xpath:"
 searchsources_element_link_xpath_text = "Descargar Xpath:"
 searchsources_element_trend_category_group = "Grupo Dentro De La Categor&iacute;a:"
-searchsources_element_trending_xpath = "El Valor De Tendencia De XPath:"
+searchsources_element_trending_regex = "El Valor De Tendencia Regex:"
 searchsources_element_media_sources = "Fuentes De Medios De Comunicaci&oacute;n"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "&iquest;Realmente desea borrar previamente descargado el feed de noticias de los datos?"
diff --git a/src/locale/fa/configure.ini b/src/locale/fa/configure.ini
index 6976f6916..61a82b9cf 100755
--- a/src/locale/fa/configure.ini
+++ b/src/locale/fa/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "ویکی مقصد:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "دانلود Xpath:"
 searchsources_element_trend_category_group = "گروه در دسته:"
-searchsources_element_trending_xpath = "روند ارزش XPath:"
+searchsources_element_trending_regex = "روند ارزش عبارت منظم:"
 searchsources_element_media_sources = "منابع رسانه"
 searchsources_element_subsearches = "زیرجستجوهای فعلی"
 searchsources_element_confirm_delete = "آیا شما واقعا می خواهید به پاک کردن قبلا دریافت خوراک خبری داده ؟ "
diff --git a/src/locale/fr_FR/configure.ini b/src/locale/fr_FR/configure.ini
index 6a71c1f65..0dbcb7038 100755
--- a/src/locale/fr_FR/configure.ini
+++ b/src/locale/fr_FR/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki de destination:"
 searchsources_element_aux_url_xpath = "Aux URL des requ&ecirc;tes Xpath:"
 searchsources_element_link_xpath_text = "T&eacute;l&eacute;charger Xpath:"
 searchsources_element_trend_category_group = "Groupe Au Sein De La Cat&eacute;gorie:"
-searchsources_element_trending_xpath = "La Tendance De La Valeur De XPath:"
+searchsources_element_trending_regex = "La Tendance De La Valeur De La Regex:"
 searchsources_element_media_sources = "Les sources des m&eacute;dias"
 searchsources_element_subsearches = "Sous-recherches"
 searchsources_element_confirm_delete = "Voulez-vous vraiment effacer pr&eacute;c&eacute;demment t&eacute;l&eacute;charg&eacute; flux de nouvelles donn&eacute;es?"
diff --git a/src/locale/he/configure.ini b/src/locale/he/configure.ini
index 8d7227b8d..41036cc28 100755
--- a/src/locale/he/configure.ini
+++ b/src/locale/he/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki היעד:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "הורד Xpath:"
 searchsources_element_trend_category_group = "קבוצה בתוך קטגוריה זו:"
-searchsources_element_trending_xpath = "מגמה ערך XPath:"
+searchsources_element_trending_regex = "מגמה ערך Regex:"
 searchsources_element_media_sources = "מקורות מדיה"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "אתה באמת רוצה לנקות שהורדו בעבר חדשות הזנת נתונים?"
diff --git a/src/locale/hi/configure.ini b/src/locale/hi/configure.ini
index 11b7cd801..54c8c646e 100755
--- a/src/locale/hi/configure.ini
+++ b/src/locale/hi/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "विकी गंतव्य:"
 searchsources_element_aux_url_xpath = "Aux यूआरएल XPaths:"
 searchsources_element_link_xpath_text = "डाउनलोड Xpath:"
 searchsources_element_trend_category_group = "समूह के भीतर श्रेणी:"
-searchsources_element_trending_xpath = "प्रवृत्ति मूल्य XPath:"
+searchsources_element_trending_regex = "प्रवृत्ति मूल्य Regex:"
 searchsources_element_media_sources = "मीडिया स्रोतों"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "क्या आप वास्तव में चाहते हैं स्पष्ट करने के लिए पहले से डाउनलोड समाचार फ़ीड डेटा?"
diff --git a/src/locale/id/configure.ini b/src/locale/id/configure.ini
index a8d946260..ab109d8f3 100755
--- a/src/locale/id/configure.ini
+++ b/src/locale/id/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Tujuan:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "Download Xpath:"
 searchsources_element_trend_category_group = "Kelompok Dalam Kategori:"
-searchsources_element_trending_xpath = "Tren Nilai XPath:"
+searchsources_element_trending_regex = "Tren Nilai Regex:"
 searchsources_element_media_sources = "Sumber-Sumber Media"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Apakah anda benar-benar ingin menghapus sebelumnya download kabar berita data?"
diff --git a/src/locale/it/configure.ini b/src/locale/it/configure.ini
index cbe099f2e..76dd0fdb0 100755
--- a/src/locale/it/configure.ini
+++ b/src/locale/it/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Destinazione:"
 searchsources_element_aux_url_xpath = "Aux Url Xpath:"
 searchsources_element_link_xpath_text = "Scarica Xpath:"
 searchsources_element_trend_category_group = "Gruppo All&#039;Interno Della Categoria:"
-searchsources_element_trending_xpath = "Valore Di Tendenza XPath:"
+searchsources_element_trending_regex = "Valore Di Tendenza Regex:"
 searchsources_element_media_sources = "Fonti Multimediali"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Vuoi davvero cancellare precedentemente scaricato news feed di dati?"
diff --git a/src/locale/ja/configure.ini b/src/locale/ja/configure.ini
index cdd9f90b7..4b492fe9b 100755
--- a/src/locale/ja/configure.ini
+++ b/src/locale/ja/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki先:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "ダウンロードXpath:"
 searchsources_element_trend_category_group = "グループカテゴリ:"
-searchsources_element_trending_xpath = "流値XPath:"
+searchsources_element_trending_regex = "流値Regex:"
 searchsources_element_media_sources = "メディア源"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "いったいク選択ダイアログボックスrssフィードにデータはもらえますか?"
diff --git a/src/locale/kn/configure.ini b/src/locale/kn/configure.ini
index bd7f62365..8f6d6d309 100755
--- a/src/locale/kn/configure.ini
+++ b/src/locale/kn/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "ವಿಕಿ ತಾಣ:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "ಡೌನ್ಲೋಡ್ Xpath:"
 searchsources_element_trend_category_group = "ಗುಂಪು ಒಳಗೆ ವರ್ಗ:"
-searchsources_element_trending_xpath = "ಪ್ರವೃತ್ತಿ ಮೌಲ್ಯ XPath:"
+searchsources_element_trending_regex = "ಪ್ರವೃತ್ತಿ ಮೌಲ್ಯ ರಿಜೆಕ್ಸ್:"
 searchsources_element_media_sources = "ಮಾಧ್ಯಮ ಮೂಲಗಳು"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Do you really want to clear ಹಿಂದೆ ಡೌನ್ಲೋಡ್ ಸುದ್ದಿ ಫೀಡ್ ಡೇಟಾ?"
diff --git a/src/locale/ko/configure.ini b/src/locale/ko/configure.ini
index e0e66df87..a0cc690b3 100755
--- a/src/locale/ko/configure.ini
+++ b/src/locale/ko/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki 대상:"
 searchsources_element_aux_url_xpath = "Aux Url Xpath:"
 searchsources_element_link_xpath_text = "다운로드 Xpath:"
 searchsources_element_trend_category_group = "그룹 내에서 카테고리:"
-searchsources_element_trending_xpath = "추 값 XPath:"
+searchsources_element_trending_regex = "추 값 Regex:"
 searchsources_element_media_sources = "미디어 소스"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "당신이 정말로 원하는 명확한 이전 다운로드 뉴스 피드 데이터가?"
diff --git a/src/locale/nl/configure.ini b/src/locale/nl/configure.ini
index 2eea6b004..7e45b9a10 100644
--- a/src/locale/nl/configure.ini
+++ b/src/locale/nl/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Bestemming:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "Download Xpath:"
 searchsources_element_trend_category_group = "Groep Binnen Een Categorie:"
-searchsources_element_trending_xpath = "Trend Waarde XPath:"
+searchsources_element_trending_regex = "Trend Waarde Regex:"
 searchsources_element_media_sources = "media Bronnen"
 searchsources_element_subsearches = "huidige Subsearches"
 searchsources_element_confirm_delete = "Wil je echt een duidelijke eerder gedownloade nieuws feed data?"
diff --git a/src/locale/pl/configure.ini b/src/locale/pl/configure.ini
index 328bd225e..af53b6509 100755
--- a/src/locale/pl/configure.ini
+++ b/src/locale/pl/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Punktem Wiki:"
 searchsources_element_aux_url_xpath = "Pomocniczy Język XPath Adres URL:"
 searchsources_element_link_xpath_text = "Pobierz Na XPath:"
 searchsources_element_trend_category_group = "Grupy Kategorii:"
-searchsources_element_trending_xpath = "Wartość Trendu W XPath:"
+searchsources_element_trending_regex = "Tendencja Wartość Regex:"
 searchsources_element_media_sources = "Źr&oacute;dła MEDI&Oacute;W "
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Czy na pewno chcesz usunąć wcześniej pobrane Wiadomości nośnik danych?"
diff --git a/src/locale/pt/configure.ini b/src/locale/pt/configure.ini
index 991f05582..f7853ae13 100755
--- a/src/locale/pt/configure.ini
+++ b/src/locale/pt/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Destino:"
 searchsources_element_aux_url_xpath = "Aux Url Xpath:"
 searchsources_element_link_xpath_text = "Download Xpath:"
 searchsources_element_trend_category_group = "Grupo Dentro Da Categoria:"
-searchsources_element_trending_xpath = "Valor De Tend&ecirc;ncia XPath:"
+searchsources_element_trending_regex = "Valor De Tend&ecirc;ncia Regex:"
 searchsources_element_media_sources = "Fontes De M&iacute;dia"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Voc&ecirc; realmente deseja limpar previamente baixados feed de not&iacute;cias de dados?"
diff --git a/src/locale/ru/configure.ini b/src/locale/ru/configure.ini
index d1154cd8e..41d532707 100755
--- a/src/locale/ru/configure.ini
+++ b/src/locale/ru/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Пунктом Вики:"
 searchsources_element_aux_url_xpath = "Вспомогательный Язык XPath URL-Адрес:"
 searchsources_element_link_xpath_text = "Скачать По XPath:"
 searchsources_element_trend_category_group = "Группы В Категории:"
-searchsources_element_trending_xpath = "Значение Тренда В XPath:"
+searchsources_element_trending_regex = "Тенденция Значение Regex:"
 searchsources_element_media_sources = "Источники СМИ "
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Вы действительно хотите удалить ранее загруженные Новости канал данных?"
diff --git a/src/locale/te/configure.ini b/src/locale/te/configure.ini
index 9503ec156..6ea29dd33 100644
--- a/src/locale/te/configure.ini
+++ b/src/locale/te/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "వికీ గమ్యం:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "డౌన్లోడ్ Xpath:"
 searchsources_element_trend_category_group = "గ్రూప్ లోపల వర్గం:"
-searchsources_element_trending_xpath = "ధోరణి విలువ XPath:"
+searchsources_element_trending_regex = "ధోరణి విలువ Regex:"
 searchsources_element_media_sources = "మీడియా వర్గాలు"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Do you really want to clear గతంలో డౌన్లోడ్ న్యూస్ ఫీడ్ డేటా?"
diff --git a/src/locale/th/configure.ini b/src/locale/th/configure.ini
index 9e6b9eab2..c172be812 100755
--- a/src/locale/th/configure.ini
+++ b/src/locale/th/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki ปลายทาง:"
 searchsources_element_aux_url_xpath = "A Button On A Remote Control อยู่ Url XPaths:"
 searchsources_element_link_xpath_text = "ดาวน์โหลด Xpath:"
 searchsources_element_trend_category_group = "กลุ่มภายในหมวดหมู่:"
-searchsources_element_trending_xpath = "นนี้กระแสความนิยมค่า XPath:"
+searchsources_element_trending_regex = "นนี้กระแสความนิยมค่า Regex:"
 searchsources_element_media_sources = "ส่วนขยายแฟ้มของแหล่ง"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "คุณต้องการจะชัดเจนความเดิมตอนที่แล้วดาวน์โหลดข้อมูลข่าวแหล่งป้อนข้อมูลนั้นเหรอ?"
diff --git a/src/locale/tl/configure.ini b/src/locale/tl/configure.ini
index d24153fa6..a172c32d9 100644
--- a/src/locale/tl/configure.ini
+++ b/src/locale/tl/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Patutunguhan:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "I-Download Xpath:"
 searchsources_element_trend_category_group = "Grupo Sa Loob Ng Kategorya:"
-searchsources_element_trending_xpath = "Trend Na Halaga XPath:"
+searchsources_element_trending_regex = "Trend Na Halaga Regex:"
 searchsources_element_media_sources = "Pinagmumulan Ng Media"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Huwag mo ba talagang nais upang i-clear ang naunang na-download na mga balita feed ng data?"
diff --git a/src/locale/tr/configure.ini b/src/locale/tr/configure.ini
index 86030d598..b063f16bc 100755
--- a/src/locale/tr/configure.ini
+++ b/src/locale/tr/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Hedef:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "İndir Xpath:"
 searchsources_element_trend_category_group = "Kategori İ&ccedil;inde Grup:"
-searchsources_element_trending_xpath = "Trend Değeri XPath:"
+searchsources_element_trending_regex = "Trend Değeri İfade:"
 searchsources_element_media_sources = "Medya Kaynakları"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Ger&ccedil;ekten daha &ouml;nce indirilen haber kaynağı verileri silmek istiyor musunuz?"
diff --git a/src/locale/vi_VN/configure.ini b/src/locale/vi_VN/configure.ini
index e088d3f6b..e4dd22063 100755
--- a/src/locale/vi_VN/configure.ini
+++ b/src/locale/vi_VN/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki Đích:"
 searchsources_element_aux_url_xpath = "Aux Url XPaths:"
 searchsources_element_link_xpath_text = "Tải Về Khởi Động:"
 searchsources_element_trend_category_group = "Nh&oacute;m Trong Mục:"
-searchsources_element_trending_xpath = "Xu Hướng Gi&aacute; Trị Khởi Động:"
+searchsources_element_trending_regex = "Xu Hướng Gi&aacute; Trị Dịch:"
 searchsources_element_media_sources = "Nguồn Phương Tiện Truyền Thông"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "Bạn có thực sự muốn rõ ràng đã tải trước đó, nguồn tin dữ liệu?"
diff --git a/src/locale/zh_CN/configure.ini b/src/locale/zh_CN/configure.ini
index 113eaa909..57845ebdb 100755
--- a/src/locale/zh_CN/configure.ini
+++ b/src/locale/zh_CN/configure.ini
@@ -1173,7 +1173,7 @@ searchsources_element_wiki_destination = "Wiki目的地:"
 searchsources_element_aux_url_xpath = "Aux Url Xpath:"
 searchsources_element_link_xpath_text = "下载Xpath:"
 searchsources_element_trend_category_group = "集团内的类别:"
-searchsources_element_trending_xpath = "一趋势值XPath:"
+searchsources_element_trending_regex = "一趋势值Regex:"
 searchsources_element_media_sources = "媒体来源"
 searchsources_element_subsearches = "Subsearches"
 searchsources_element_confirm_delete = "你真的想清楚之前下载的新闻饲料的数据?"
diff --git a/src/models/MachineModel.php b/src/models/MachineModel.php
index eb1f22051..442f20ecf 100644
--- a/src/models/MachineModel.php
+++ b/src/models/MachineModel.php
@@ -519,10 +519,10 @@ class MachineModel extends Model
         chmod($job_file, 0777);
     }
     /**
-     *  Returns the name of a job from its class' file path
+     *  Returns the name of a job from its class file path
      *
-     *  @param string $job_path class' file path of job
-     *  @return string name of a job
+     * @param string $job_path class file path of job
+     * @return string name of a job
      */
     private function getJobNameFromPath($job_path)
     {
diff --git a/src/models/PhraseModel.php b/src/models/PhraseModel.php
index cb61bb870..8f5afa79d 100755
--- a/src/models/PhraseModel.php
+++ b/src/models/PhraseModel.php
@@ -487,18 +487,23 @@ class PhraseModel extends ParallelModel
             $total_rows = 0;
             $results['TOTAL_ROWS'] = 0;
         }
+        $query_map_count = 0;
+        if (!empty($query_map_results)) {
+            $results['PAGES'] = $results['PAGES'] ?? [];
+            $original_num_pages = count($results['PAGES']);
+            $results['PAGES'] = array_merge(
+                $query_map_results, $results['PAGES']);
+            $query_map_count = count($results['PAGES']) - $original_num_pages;
+        }
         if (isset($total_rows)) {
-            $results['TOTAL_ROWS'] = $total_rows;
+            $results['TOTAL_ROWS'] = $total_rows + $query_map_count;
         } elseif (isset($results['PAGES'])) {
-            $results['TOTAL_ROWS'] = count($results['PAGES']);
+            $results['TOTAL_ROWS'] = count($results['PAGES']) +
+                $query_map_count;
         }
         if ($raw == 0 && isset($results['TOTAL_ROWS']) &&
             $results['TOTAL_ROWS'] > 0) {
             if (!empty($filter)) {
-                if (!empty($query_map_results) && !empty($results['PAGES'])) {
-                    $results['PAGES'] = array_merge(
-                        $query_map_results, $results['PAGES']);
-                }
                 $results = $filter->incorporateEditedPageResults($results,
                     $format_words);
             }
diff --git a/src/models/SourceModel.php b/src/models/SourceModel.php
index 8fc8c6a1d..2d8e38435 100644
--- a/src/models/SourceModel.php
+++ b/src/models/SourceModel.php
@@ -236,7 +236,13 @@ class SourceModel extends ParallelModel
         $sql = "SELECT LOCALE_ID FROM LOCALE ".
             "WHERE LOCALE_TAG = ? " . $db->limitOffset(1);
         $result = $db->execute($sql, [$locale_tag]);
+        if (!$result) {
+            return $subsearches;
+        }
         $row = $db->fetchArray($result);
+        if (!$row) {
+            return $subsearches;
+        }
         $locale_id = $row['LOCALE_ID'];
         $sql = "SELECT S.LOCALE_STRING AS LOCALE_STRING, ".
             "S.FOLDER_NAME AS FOLDER_NAME, ".
diff --git a/src/scripts/help.js b/src/scripts/help.js
index 4b25e721d..24c6b9cd9 100644
--- a/src/scripts/help.js
+++ b/src/scripts/help.js
@@ -345,12 +345,13 @@ function displayHelpForId(help_point, is_mobile, target_controller,
                     back_params) + '">' +
                 tl["helpbutton_helper_edit"] + '</a>]';
                 elt("help-frame-body").innerHTML =
-                    (tl["wiki_view_page_no_exist"]).replace("%s", "'" +
+                    (tl["helpbutton_helper_page_no_exist"]).replace("%s", "'" +
                     help_point.getAttribute("data-pagename") + "'") +
-                    tl["wiki_view_create_edit"];
+                    tl["helpbutton_helper_create_edit"];
                 toggleHelp('help-frame', is_mobile, target_controller);
             } else {
-                doMessage("<h2 class='red'>" + tl["wiki_view_not_available"] +
+                doMessage("<h2 class='red'>" +
+                tl["helpbutton_helper_not_available"] +
                 "</h2>");
             }
         });
diff --git a/src/views/AdminView.php b/src/views/AdminView.php
index 6b0a111fb..21500e37f 100755
--- a/src/views/AdminView.php
+++ b/src/views/AdminView.php
@@ -51,8 +51,13 @@ class AdminView extends ComponentView
         $this->addContainer("top", "adminbar");
         $this->addContainer("top", "adminmenu");
         $this->addContainer("sub-top", "header");
+        if ($_SERVER["MOBILE"]) {
+            $this->addContainer("center", "help");
+        }
         $this->addContainer("center", "admin");
         $this->addContainer("center", "footer");
-        $this->addContainer("opposite", "help");
+        if (!$_SERVER["MOBILE"]) {
+            $this->addContainer("opposite", "help");
+        }
     }
 }
diff --git a/src/views/MachinestatusView.php b/src/views/MachinestatusView.php
index f850b58e2..d32ef62b1 100644
--- a/src/views/MachinestatusView.php
+++ b/src/views/MachinestatusView.php
@@ -64,7 +64,9 @@ class MachinestatusView extends View
         $caution = !isset($data['MACHINES']['NAME_SERVER']["MediaUpdater"])
             || $data['MACHINES']['NAME_SERVER']["MediaUpdater"] == 0;
         ?>
-        <h2><?=tl('machinestatus_view_media_updater')?></h2>
+        <h2><?=tl('machinestatus_view_media_updater'). "&nbsp;" .
+            $this->helper("helpbutton")->render(
+            "Media Updater", $data[C\CSRF_TOKEN]) ?></h2>
         <div class="no-margin">[<a href="<?=$base_url . 'mediajobs'
             ?>"><?= tl('machinestatus_view_configure_media_jobs'); ?>]</a></div>
         <div class="box">
diff --git a/src/views/elements/LanguageElement.php b/src/views/elements/LanguageElement.php
index d719bad1f..3c8530005 100755
--- a/src/views/elements/LanguageElement.php
+++ b/src/views/elements/LanguageElement.php
@@ -46,6 +46,9 @@ class LanguageElement extends Element
      */
     public function render($data)
     {
+        if (empty($data['LANGUAGES'])) {
+            return;
+        }
         $num_languages = count($data['LANGUAGES']);
         $size = min(4, $num_languages);
         if (!empty($data['LANGUAGES_TO_SHOW'])) {
diff --git a/src/views/elements/MediajobsElement.php b/src/views/elements/MediajobsElement.php
index 099a20c98..4dba0780f 100644
--- a/src/views/elements/MediajobsElement.php
+++ b/src/views/elements/MediajobsElement.php
@@ -56,7 +56,9 @@ class MediajobsElement extends Element
         [<a href="<?=$base_url ?>"
         >X</a>]
         </div>
-        <h2><?=tl('mediajobs_element_configure_media_jobs') ?></h2>
+        <h2><?=tl('mediajobs_element_configure_media_jobs') . "&nbsp;" .
+            $this->view->helper("helpbutton")->render(
+            "Configure Media Jobs", $data[C\CSRF_TOKEN]) ?></h2>
         <div><b><?php
             e(tl('mediajobs_element_mode'));
         ?></b> [<?php
diff --git a/src/views/elements/ScrapersElement.php b/src/views/elements/ScrapersElement.php
index b4690b75c..d5a094311 100644
--- a/src/views/elements/ScrapersElement.php
+++ b/src/views/elements/ScrapersElement.php
@@ -137,7 +137,9 @@ class ScrapersElement extends Element
         </div><?php
     }
     /**
+     * Used to draw the formm for adding a new scraper or editing an existing one
      *
+     * @param array $data containspotentially edit info for the current scraper
      */
     public function renderScraperForm($data)
     {
diff --git a/src/views/elements/SearchsourcesElement.php b/src/views/elements/SearchsourcesElement.php
index 74db4f3ad..9c2310bb4 100644
--- a/src/views/elements/SearchsourcesElement.php
+++ b/src/views/elements/SearchsourcesElement.php
@@ -113,7 +113,7 @@ class SearchsourcesElement extends Element
                 tl('searchsources_element_wiki_destination') => 5],
             "trending_value" => [
                 tl('searchsources_element_trend_category_group') => 5,
-                tl('searchsources_element_trending_xpath') => 6,
+                tl('searchsources_element_trending_regex') => 6,
             ],
         ];
         $num_sub_aux_fields = 6;
@@ -642,7 +642,7 @@ class SearchsourcesElement extends Element
         <tr><td><label for="trend-stop-string"><b><span id="trend-text"><?=
             tl('searchsources_element_trending_stop_regex')
             ?></span><span id="trending-xpath"><?=
-                tl('searchsources_element_trending_xpath')
+                tl('searchsources_element_trending_regex')
                 ?></span></b></label></td><td>
             <input type="text" id="trend-stop-string"
                 name="trending_stop_regex"
diff --git a/tests/UtilityTest.php b/tests/UtilityTest.php
index 10b7e546a..1d42223b4 100644
--- a/tests/UtilityTest.php
+++ b/tests/UtilityTest.php
@@ -53,6 +53,10 @@ class UtilityTest extends UnitTest
     public function tearDown()
     {
     }
+    /**
+     * Determines if the checkTimeInterval method can correctly determin
+     * if a time of day is between the times of day of two timestamps
+     */
     public function checkTimeIntervalTestCase()
     {
         $three_oh_five = 1592172350;
ViewGit