cleaning some code (from MR #9)

This commit is contained in:
Carl Chenet 2017-07-31 15:58:30 +02:00
parent 67b7e72507
commit 37ee1e5a67
6 changed files with 27 additions and 29 deletions

View File

@ -20,7 +20,7 @@
# standard library imports # standard library imports
from operator import itemgetter from operator import itemgetter
class AddTags(object): class AddTags:
'''Add as many tags as possible depending on the tweet length''' '''Add as many tags as possible depending on the tweet length'''
def __init__(self, tweet, tags): def __init__(self, tweet, tags):
'''Constructor of AddTags class''' '''Constructor of AddTags class'''
@ -34,14 +34,14 @@ class AddTags(object):
tweetlength = len(self.tweet) tweetlength = len(self.tweet)
# sort list of tags, the ones with the greater length first # sort list of tags, the ones with the greater length first
tagswithindices = ({'text':i, 'length':len(i)} for i in self.tags) tagswithindices = ({'text':i, 'length': len(i)} for i in self.tags)
sortedtagswithindices = sorted(tagswithindices, key=itemgetter('length'), reverse=True) sortedtagswithindices = sorted(tagswithindices, key=itemgetter('length'), reverse=True)
self.tags = (i['text'] for i in sortedtagswithindices) self.tags = (i['text'] for i in sortedtagswithindices)
# add tags is space is available # add tags is space is available
for tag in self.tags: for tag in self.tags:
taglength = len(tag) taglength = len(tag)
if (tweetlength + (taglength +1)) <= maxlength: if (tweetlength + (taglength + 1)) <= maxlength:
self.tweet = ' '.join([self.tweet, tag]) self.tweet = ' '.join([self.tweet, tag])
tweetlength += (taglength + 1) tweetlength += (taglength + 1)

View File

@ -25,7 +25,7 @@ import sys
__version__ = '0.5' __version__ = '0.5'
class CliParse(object): class CliParse:
'''CliParse class''' '''CliParse class'''
def __init__(self): def __init__(self):
'''Constructor for the CliParse class''' '''Constructor for the CliParse class'''
@ -34,7 +34,7 @@ class CliParse(object):
def main(self): def main(self):
'''main of CliParse class''' '''main of CliParse class'''
feed2tootepilog = 'For more information: https://feed2toot.readhthedocs.org' feed2tootepilog = 'For more information: https://feed2toot.readhthedocs.org'
feed2tootdescription = 'Take rss feed and send it to Mastodon' feed2tootdescription = 'Take rss feed and send it to Mastodon'
parser = ArgumentParser(prog='feed2toot', parser = ArgumentParser(prog='feed2toot',
description=feed2tootdescription, description=feed2tootdescription,
epilog=feed2tootepilog) epilog=feed2tootepilog)
@ -63,7 +63,7 @@ class CliParse(object):
action='store_const', const='debug', default='warning', action='store_const', const='debug', default='warning',
help='enable debug output, work on log level DEBUG') help='enable debug output, work on log level DEBUG')
levels = [i for i in logging._nameToLevel.keys() levels = [i for i in logging._nameToLevel.keys()
if (type(i) == str and i != 'NOTSET')] if (type(i) == str and i != 'NOTSET')]
parser.add_argument('--syslog', nargs='?', default=None, parser.add_argument('--syslog', nargs='?', default=None,
type=str.upper, action='store', type=str.upper, action='store',
const='INFO', choices=levels, const='INFO', choices=levels,

View File

@ -17,18 +17,17 @@
'''Get values of the configuration file''' '''Get values of the configuration file'''
# standard library imports # standard library imports
from configparser import SafeConfigParser, NoOptionError, NoSectionError from configparser import SafeConfigParser
import logging import logging
import os import os
import os.path import os.path
import socket
import sys import sys
import re import re
# 3rd party library imports # 3rd party library imports
import feedparser import feedparser
class ConfParse(object): class ConfParse:
'''ConfParse class''' '''ConfParse class'''
def __init__(self, clioptions): def __init__(self, clioptions):
'''Constructor of the ConfParse class''' '''Constructor of the ConfParse class'''
@ -46,7 +45,6 @@ class ConfParse(object):
config = SafeConfigParser() config = SafeConfigParser()
if not config.read(os.path.expanduser(pathtoconfig)): if not config.read(os.path.expanduser(pathtoconfig)):
sys.exit('Could not read config file') sys.exit('Could not read config file')
# The feedparser section # The feedparser section
if config.has_option('feedparser', 'accept_bozo_exceptions'): if config.has_option('feedparser', 'accept_bozo_exceptions'):
self.accept_bozo_exceptions = config.getboolean('feedparser', 'accept_bozo_exceptions') self.accept_bozo_exceptions = config.getboolean('feedparser', 'accept_bozo_exceptions')
@ -185,7 +183,7 @@ class ConfParse(object):
sys.exit('The parent directory of the cache file does not exist: {cachefileparent}'.format(cachefileparent=cachefileparent)) sys.exit('The parent directory of the cache file does not exist: {cachefileparent}'.format(cachefileparent=cachefileparent))
else: else:
options['cachefile'] = self.clioptions.cachefile options['cachefile'] = self.clioptions.cachefile
### cache limit # cache limit
if config.has_section(section): if config.has_section(section):
confoption = 'cache_limit' confoption = 'cache_limit'
if config.has_option(section, confoption): if config.has_option(section, confoption):
@ -225,7 +223,7 @@ class ConfParse(object):
# host, port, user, pass, database options # host, port, user, pass, database options
########################################## ##########################################
plugins[section] = {} plugins[section] = {}
for currentoption in ['host','port','user','pass','database']: for currentoption in ['host', 'port', 'user', 'pass', 'database']:
if config.has_option(section, currentoption): if config.has_option(section, currentoption):
plugins[section][currentoption] = config.get(section, currentoption) plugins[section][currentoption] = config.get(section, currentoption)
if 'host' not in plugins[section]: if 'host' not in plugins[section]:
@ -234,7 +232,7 @@ class ConfParse(object):
plugins[section]['port'] = 8086 plugins[section]['port'] = 8086
if 'measurement' not in plugins[section]: if 'measurement' not in plugins[section]:
plugins[section]['measurement'] = 'tweets' plugins[section]['measurement'] = 'tweets'
for field in ['user','pass','database']: for field in ['user', 'pass', 'database']:
if field not in plugins[section]: if field not in plugins[section]:
sys.exit('Parsing error for {field} in the [{section}] section: {field} is not defined'.format(field=field, section=section)) sys.exit('Parsing error for {field} in the [{section}] section: {field} is not defined'.format(field=field, section=section))

View File

@ -38,9 +38,9 @@ class FeedCache:
with open(self.options['cachefile']) as dbdsc: with open(self.options['cachefile']) as dbdsc:
dbfromfile = dbdsc.readlines() dbfromfile = dbdsc.readlines()
dblist = [i.strip() for i in dbfromfile] dblist = [i.strip() for i in dbfromfile]
self.dbfeed = deque(dblist, self.options['cache_limit'] ) self.dbfeed = deque(dblist, self.options['cache_limit'])
else: else:
self.dbfeed = deque([], self.options['cache_limit'] ) self.dbfeed = deque([], self.options['cache_limit'])
def append(self, rssid): def append(self, rssid):
'''Append a rss id to the cache''' '''Append a rss id to the cache'''

View File

@ -16,9 +16,6 @@
# Push values to a influxdb database # Push values to a influxdb database
'''Push values to a influxdb database''' '''Push values to a influxdb database'''
# standard libraries imports
import json
# 3rd party libraries imports # 3rd party libraries imports
from influxdb import InfluxDBClient from influxdb import InfluxDBClient
@ -30,13 +27,16 @@ class InfluxdbPlugin(object):
self.data = data self.data = data
self.datatoinfluxdb = [] self.datatoinfluxdb = []
self.client = InfluxDBClient(self.plugininfo['host'], self.client = InfluxDBClient(self.plugininfo['host'],
self.plugininfo['port'], self.plugininfo['port'],
self.plugininfo['user'], self.plugininfo['user'],
self.plugininfo['pass'], self.plugininfo['pass'],
self.plugininfo['database']) self.plugininfo['database'])
self.main() self.main()
def main(self): def main(self):
'''Main of the PiwikModule class''' '''Main of the PiwikModule class'''
self.datatoinfluxdb.append({'measurement': self.plugininfo['measurement'], 'fields': {'value': self.data}}) self.datatoinfluxdb.append({
'measurement': self.plugininfo['measurement'],
'fields': {'value': self.data}
})
self.client.write_points(self.datatoinfluxdb) self.client.write_points(self.datatoinfluxdb)

View File

@ -17,7 +17,7 @@
# Remove duplicates from the final string before sending the tweet # Remove duplicates from the final string before sending the tweet
'''Remove duplicates from the final string before sending the tweet''' '''Remove duplicates from the final string before sending the tweet'''
class RemoveDuplicates(object): class RemoveDuplicates:
'''Remove duplicates from the final string before sending the tweet''' '''Remove duplicates from the final string before sending the tweet'''
def __init__(self, tweet): def __init__(self, tweet):
'''Constructor of RemoveDuplicates class''' '''Constructor of RemoveDuplicates class'''
@ -32,12 +32,12 @@ class RemoveDuplicates(object):
if element != ' ' and (element.startswith('http://') or element.startswith('https://')): if element != ' ' and (element.startswith('http://') or element.startswith('https://')):
newlink = True newlink = True
# if we already found this link, increment the counter # if we already found this link, increment the counter
for i,_ in enumerate(links): for i, _ in enumerate(links):
if links[i]['link'] == element: if links[i]['link'] == element:
newlink = False newlink = False
links[i]['count'] += 1 links[i]['count'] += 1
if newlink: if newlink:
links.append({'link': element, 'count': 1}) links.append({'link': element, 'count': 1})
# remove duplicates # remove duplicates
validatedlinks = [] validatedlinks = []
for i in range(len(links)): for i in range(len(links)):
@ -45,14 +45,14 @@ class RemoveDuplicates(object):
validatedlinks.append(links[i]) validatedlinks.append(links[i])
wildcard = 'FEED2TOOTWILDCARD' wildcard = 'FEED2TOOTWILDCARD'
for element in validatedlinks: for element in validatedlinks:
for i in range(element['count']): for i in range(element['count']):
# needed for not inversing the order of links if it is a duplicate # needed for not inversing the order of links if it is a duplicate
# and the second link is not one # and the second link is not one
if i == 0: if i == 0:
self.tweet = self.tweet.replace(element['link'], wildcard, 1 ) self.tweet = self.tweet.replace(element['link'], wildcard, 1)
else: else:
self.tweet = self.tweet.replace(element['link'], '', 1) self.tweet = self.tweet.replace(element['link'], '', 1)
# finally # finally
self.tweet = self.tweet.replace(wildcard, element['link'], 1) self.tweet = self.tweet.replace(wildcard, element['link'], 1)
# remove all 2xspaces # remove all 2xspaces
self.tweet = self.tweet.replace(' ', ' ') self.tweet = self.tweet.replace(' ', ' ')