User:PearBOT/Biography short descriptions/source
Appearance
/* This program is dual licensed under the MIT license (provided below) and CC-BY-SA 3.0.
Copyright 2021 Wikipedia user Trialpears
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
import pywikibot
import re
from pywikibot import pagegenerators
site = pywikibot.Site('en', 'wikipedia')
def extractfirst(text):
i=0
result=text
result=re.sub("\[\[[Cc]ategory:[^\]]*]]","",result)
result=re.sub("\[\[[Ff]ile:[^\]]*]]","",result)
result=re.sub("\[\[[Ii]mage:[^\]]*]]","",result)
result=re.sub("\n"," ",result)
result=re.sub("==(a|[^a])*","",result,re.DOTALL)
result=re.sub("'{3,5}[^']*'{3,5}","",result)
result=re.sub("''+","",result)
result=re.sub("\[\[([^\|\]\[]*\|)?([^\|\]\[]*)]]",r"\2",result)
while i < 5:
result=re.sub("{{[^{}]*}}","",result)
result=re.sub("\([^\(\)]*\)","",result)
result=re.sub("<ref[^<>]*>[^<>]*<\/ref>","",result)
result=re.sub("<ref[^<>]*\/>","",result)
result=re.sub("<!--[^<>]*-->","",result)
i+=1
result=re.sub("\n","",result)
result=re.sub(" *"," ",result)
result=re.sub("(^.*?[.!?](?=\s[A-Z]|$)).*",r"\1",result)
result=re.sub("^\s*","",result)
result=re.sub("\s*(?=,|\.)","",result)
result=re.sub("\s*$","",result)
return result
def extractdescription(text):
result=text
nationalityregex = "(Afghan|Albanian|Algerian|Andorran|Angolan|Barbuda|Antiguan|Barbudan|Argentine|Armenian|Australian|Austrian|Azerbaijani|Azeri|Bahamas|Bahamian|Bahraini|Bengali|Barbadian|Belarusian|Belgian|Belizean|Beninese|Beninois|Bhutanese|Bolivian|Bosnian|Herzegovinian|Motswana|Botswanan|Brazilian|Bruneian|Bulgarian|Faso|Burkinabé|Burmese|Burundian|Verde|Cabo|Verdean|Cambodian|Cameroonian|Canadian|African|Chadian|Chilean|Chinese|Colombian|Comoran|Comorian|Congolese|Rican|Ivorian|Croatian|Cuban|Cypriot|Republic|Czech|Danish|Djiboutian|Dominican|Republic|Dominican|Timor|Timorese|Ecuadorian|Egyptian|Salvador|Salvadoran|Guinea|Equatorial|Guinean|Equatoguinean|Eritrean|Estonian|Ethiopian|Fijian|Finnish|French|Gabonese|Gambian|Georgian|German|Ghanaian|Gibraltar|Greek|Hellenic|Grenadian|Guatemalan|Guinean|Bissau|Guinean|Guyanese|Haitian|Honduran|Hungarian|Magyar|Icelandic|Indian|Indonesian|Iranian|Persian|Iraqi|Irish|Israeli|Italian|Coast|Ivorian|Jamaican|Japanese|Jordanian|Kazakhstani|Kazakh|Kenyan|Kiribati|Korea|North|Korean|Korea|South|Korean|Kuwaiti|Kyrgyzstani|Kyrgyz|Kirgiz|Kirghiz|Lao|Laotian|Latvian|Lettish|Lebanese|Basotho|Liberian|Libyan|Liechtensteiner|Lithuanian|Luxembourg|Luxembourgish|Macedonian|Malagasy|Malawian|Malaysian|Maldivian|Malian|Malinese|Maltese|Islands|Marshallese|Martiniquais|Martinican|Mauritanian|Mauritian|Mexican|Micronesian|Moldovan|Monégasque|Monacan|Mongolian|Montenegrin|Moroccan|Mozambican|Namibian|Nauruan|Nepali|Nepalese|Dutch|Netherlandic|Zealand|Zealand|Zelanian|Nicaraguan|Nigerien|Nigerian|Marianan|Norwegian|Omani|Pakistani|Palauan|Palestinian|Panamanian|Guinea|Papua|Guinean|Papuan|Paraguayan|Peruvian|Filipino|Philippine|Polish|Portuguese|Rico|Puerto|Rican|Qatari|Romanian|Russian|Rwandan|Kitts|and|Nevis|Kittitian|Nevisian|Saint|Lucian|Saint|Vincentian|Vincentian|Samoan|Marino|Sammarinese|Tomé|Príncipe|São|Toméan|Arabia|Saudi|Arabian|Senegalese|Serbian|Seychellois|Leone|Sierra|Leonean|Singapore|Singaporean|Slovak|Slovenian|Slovene|Islands|Solomon|Island|Somali|African?|South|African|Sudan|South|Sudanese|Spanish|Lanka|Sri|Lankan|Sudanese|Surinamese|Swazi|Swedish|Swiss|Syrian|Tajikistani|Tanzanian|Thai|Leste|Timorese|Togolese|Tokelauan|Tongan|Tobago|Trinidadian|Tobagonian|Tunisian|Turkish|Turkmen|Tuvaluan|Ugandan|Ukrainian|Arab|Emirates|Emirati|Emirian|Emiri|Kingdom|Great|Britain|Northern|Ireland|UK|British|America|United|States|U.S.|American|Uruguayan|Uzbekistani|Uzbek|Vanuatu|Vanuatuan|Vatican|Venezuelan|Vietnamese|Yemeni|Zambian|Zimbabwean)"
if re.search("\.[^\s]\.",result,re.IGNORECASE) or re.search("(br|chan|chapln|dr|fr|gov|miss|mr|mrs|ms|mme|m|msgr|pres|prof|rep|rev|revs|sen|sr|sra|srta|hon|esq|jr|ret|lt|col|sgt|gen|cpl|capt|bg|adm|cwo|ens|maj|msgt|st)\.",result,re.IGNORECASE):
return False
if re.search("(and|or)$",result):
return False
return False
if re.search("(and|or)$",result):
return False
if re.match('^(("?[A-Z][a-z]*"?|(Jr\.)),? )*?(is|was) (a|an) ',result):
result=re.sub('^(("?[A-Z][a-z]*"?|(Jr\.)),? )*?(is|was) (a|an) (.*)',r"\6",result)
result=re.sub(',? who.*',"",result)
result=re.sub(',? and currently.*',"",result)
result=re.sub(',? currently.*',"",result)
result=re.sub(',? as well.*',"",result)
result=re.sub(',? better known.*',"",result)
result=re.sub(',? best known.*',"",result)
result=re.sub(',? also known.*',"",result)
result=re.sub(',? most known.*',"",result)
result=re.sub(',? mostly known.*',"",result)
result=re.sub(',? generally known.*',"",result)
result=re.sub(',? especially known.*',"",result)
result=re.sub(',? internationally known.*',"",result)
result=re.sub(',? well known.*',"",result)
result=re.sub(',? particularly known.*',"",result)
result=re.sub(',? primarily known.*',"",result)
result=re.sub(',? also known.*',"",result)
result=re.sub(',? known for.*',"",result)
result=re.sub(',? riding for( the)?.*',"",result)
result=re.sub(",? active in.*","",result)
result=re.sub(",? born in.*","",result)
result=re.sub(",? perhaps.*","",result)
result=re.sub(",? mainly.*","",result)
result=re.sub(",? [A-Z][a-z]* (is|are|were|was)(a|[^a])*","",result)
result=re.sub("\.$","",result)
result=re.sub("\bformer\b","",result)
result=re.sub('[,;]? (he|she|they) (is|are|were|was).*',"",result)
result=re.sub('([a-zA-Z])', lambda x: x.groups()[0].upper(), result, 1)
result=re.sub("[\.\,\;]$","",result)
result=re.sub(",? (and|is|that|was|were|are|for) ?(and|is|that|was|were|are|for)?$","",result)
if re.search(", [A-Za-z]+$",result):
return False
if not re.search(' ',result) or re.search('_',result):
return False
if len(result) <= 40:
if re.match(nationalityregex,result):
return result
return False
savecounter = 0
def category_filter(generator, category):
"""
Filter memebers of the specified category out of the generator.
@param generator: Generator to filter
@type generator: iterator
@param category: Category to filter out
@type category: L{pywikibot.pgae.Category}
"""
for page in generator:
if category not in page.categories():
yield page
def dates(text,se):
if re.search("infobox (office ?holder|governor|senator|mayor|politician|chancellor|(vice)? president|congressman|prime minister|mp|member of parlament)",text,re.IGNORECASE):
return "False"
if re.search("(diplomat|republican|democrat|representative|office ?holder|governor|senator|mayor|politician|chancellor|(vice)? president|congressman|prime minister|member of parlament)",sd,re.IGNORECASE):
return "False"
if re.search("(birth based on age as of date|bbad|birth year from age at date)",text,re.IGNORECASE):
return "False"
if re.search("birth_date\s*=\s*[^<}\n]*?(\d\d\d\d)",text,re.IGNORECASE):
return "(born "+re.search("birth_date\s*=\s*[^}\n]*?(\d\d\d\d)",text,re.IGNORECASE).group(1)+")"
if re.search("born[^\)\.]{1,30}?(\d\d\d\d)",re.sub("==(a|[^a])*","",text,re.DOTALL),re.IGNORECASE):
return "(born "+re.search("born[^\)\.]{1,30}?(\d\d\d\d)",text,re.IGNORECASE).group(1)+")"
living_people_cat = pywikibot.Category(site, 'Living people')
sd_article_cat = pywikibot.Category(site, 'Articles with short description')
gen = pagegenerators.CategorizedPageGenerator(living_people_cat,start='Luca')
gen = category_filter(gen, sd_article_cat)
gen = pagegenerators.PreloadingGenerator(gen)
savecounter = 0
for page in gen:
sd = extractdescription(extractfirst(page.text))
if not sd or 'short description' in page.text: # Is the second condition necessary?
continue
datetxt = dates(page.text,sd)
if datetxt is "False":
continue
if datetxt:
sd = str(sd) + " " + str(datetxt)
print(page.title())
description = "{{short description|" + sd + "}}\n"
page.text = description + page.text
savecounter+=1
print(description)
if pywikibot.Page(site, u"User:PearBOT/Biography short descriptions/stop page").text == "":
page.save('Adding automatically generated short description. For more information see [[Wikipedia:Bots/Requests for approval/PearBOT 5]] Feedback appreciated at [[User talk:Trialpears]]')
pass
else:
break