Python

From Torben's Wiki


Contents

Template

Standard Template

see header documentation example 1 and example 2 see Google Python Styleguide

#!/usr/bin/python
# -*- coding: utf-8 -*-

"""
Here comes the docstring containing the description of this piece of software
""" 

# Built-in/Generic Imports
import os
import os.path  # os.path - The key to File I/O
import argparse
import configparser
import logging

# Libs
# import openpyxl

# Author and version info
__author__ = "Dr. Torben Menke"
__email__ = "https://entorb.net"
__maintainer__ = __author__
# __copyright__ = "Copyright 2020, My Project"
# __credits__ = ["John", "Jim", "Jack"]
__license__ = "GPL"
__status__ = "Dev"
__version__ = "0.1"


# Init Logging
# from [1]
# create logger
logger = logging.getLogger('myLogger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
logger_fh = logging.FileHandler('log.log')
logger_fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
logger_ch = logging.StreamHandler()
logger_ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
# %(name)s = LoggerName, %(threadName)s = TreadName
logger_formatter = logging.Formatter(
    '%(asctime)s - %(levelname)s - %(message)s')
logger_fh.setFormatter(logger_formatter)
logger_ch.setFormatter(logger_formatter)
# add the handlers to the logger
logger.addHandler(logger_fh)
logger.addHandler(logger_ch)
#
logger.debug('DebugMe')
logger.info('Starting')
logger.warning('Attention')
logger.error('Something went wrong')
logger.critical('Something seriously went wrong')


# Read Commandline Parameters
# construct the argument parser and parse the arguments
arg_parser = argparse.ArgumentParser()
# -h comes automatically
# Boolean Parameter
arg_parser.add_argument("-v", "--verbose", help="increase output verbosity",
                        action="store_true")  # store_true -> Boolean Value
#
args = vars(arg_parser.parse_args())
if args["verbose"]:
    logger_ch.setLevel(logging.DEBUG)


# Read config file
config = configparser.ConfigParser()
config.read('config.ini', encoding='utf-8')
# print(config.getfloat('Section1', 'Value1'))


# Dummy variables
s = "asdf"
l = ["a", "b", "c"]
d = {'keyx': 'valuex', 'keyy': 'valuey'}
d['keyz'] = 'valuez'


# File access
fileOut = "out/1/out.txt"
(filepath, fileName) = os.path.split(fileOut)
(fileBaseName, fileExtension) = os.path.splitext(fileName)
os.makedirs(filepath, exist_ok=True)

FILE = open(fileOut, "w")  # w = overWrite file ; a = append to file
FILE.write(s + "\n")
FILE.close()

fileIn = fileOut
FILE = open(fileIn, "r")
cont_list = (FILE.read()).split("\n")
FILE.close()

Object Oriented Template

class myDevice():
    def __init__(self, devicename="": str, verbose=False: boolean): 
        # name of the device (e.g. for log messages)
        self.devicename = devicename
        self.verbose = verbose   # whether to log information or be quiet
    def log(self, msg: str):
        print(msg)

class SMU236(myDevice):
    def __init__(self, gpibaddress: float, devicename="": str, verbose=False: boolean):
        myDevice.__init__(self, devicename, verbose)
        self.gpibaddress = gpibaddress

if __name__ == "__main__":
    SMU = SMU236(1234)
    SMU.log("Starting")

method using self.x as default parameter

    def takeScreenshot(self, x=None):
        if x is None:
            x = self.windowsGeo['x']

Basics

Installing packages

python -m pip install --upgrade pip
pip install somemodule
or 
pip3 install somemodule
# using a web proxy
# set proxy for windows cmd session
SET HTTPS_PROXY=http://myProxy:8080
(afterwards --proxy setting below no longer required
or
pip install --proxy http://myProxy:8080 somemodule

Naming Conventions

Google Python Style Guide:

module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name

See Category:Python for more stuff

Variables

del var     # delete / undef a variable
var = None  # sets to null
# check if variable is defined
if xyz in locals() :
# for object oriented projects:
if "xyz" in self.__dict__.keys(): 

sleep for a while

import time
time.sleep(60)

wait for user input

input("press Enter to close")

Access global variables in functions

myVar = 123
def test():
    global myVar # point to global instead of creation of local var
    myVar = 321

Strings

# num <-> str
s = str (i) # int to string
f = float(s) # str -> float
i = int(s)
str(round(f, 1)) # round first 
# tests
s.isdigit() # 0-9
# note isdecimal() does also not match '1.1'


modify string

text = text.strip() # trim spaces from both sides, rstrip for right only
text = text.lower() # lower case
text = text.upper() # upper case
text = text.title() # upper case for first char of word

import string
string.capwords(s) # upper case first letter of each word and also removes multiple and trailing spaces

text = prompt("Enter Text: ") # get string from prompt
print("Good Morning!", end = ) # print without linebreak

text.replace(x, y)

s.strip() # trim whitespaces from left and right
# replace all (multiple) whitespaces by single space ' '
# join strings
s = ' '.join(s.split())
# no not use s += s1, for s1 in dict
# key1=value1&key2=value2
param_str = "&".join("=".join(tup) for tup in dict.items())

s * 5 # = s+s+s+s+s

substrings

# find a substring:
x in s
> True / False

if len(s) > 0

# handling substrings
a = "abcd"
b = a[:1] + "o" + a[2:] 
> 'aocd'

myString="Hello there !bob@"
i1 = myString.find("!")+1
i2 = myString.find("@")
mySubString=myString[i1:i2]

def substr_between(s: str, s1: str, s2: str) -> str:
    assert s1 in s, f'E: can\'t find \'{s1}\' in \'{s}\
    assert s2 in s, f'E: can\'t find \'{s1}\' in \'{s}\
    i1 = s.find(s1)+len(s1)
    i2 = s.find(s2)
    assert i1 < i2, f'E: \'{s1}\' not before \'{s2}\' in \'{s}\
    return s[i1:i2]

Binary, formatted, raw strings

# Binary Strings
key = b'asdf'
# or 
key = str.encode('asdf')
s = key.decode('ascii')  # decode binary strings
key = s.encode('ascii')  # encode string to binary
# Formatted string
s = f''
# raw string
s = r'c:\Windows\'  # no excape of \  needed
# convert utf-8 to html umlaute
lk_name = "Nürnberg".encode('ascii', 'xmlcharrefreplace').decode()
# -> Nürnberg

merge variables in string

print ("Renner =", i)
print ("Renner = %3d" % i) # leading 0's
print (f"Renner = {i}")

# place formatted numbers in a string / sprintf
"The %03i %s cost %f euros" % (3, "beers", 11.50)
> 'The 3 beers cost 11.500000 euros'

"The length is %.2f meters" % 72.8958
>'The length is 72.90 meters'

p= "%.1f%%/min" % precent

Lists

like arrays in Perl

L = [1,2,3,4,5,6]
L = [x for x in range(10)]
L = "Word1 Word2 Word3".split() # split by spaces, like QW in Perl, use split(",") to split on ","
len(L)
L[0:10] # get elements 0-10
for a in L:
  ...

ATTENTION: = generates not a clone but a link

M = L       # M's elements are links to L's
# clones can be achieved via:
M = L.copy  # clones L
M = L[:]    # clones L
M = list(L) # clones L
L.append(x) # append a single element
L.extend(M) # put elements of list M to the end of List L
L.insert(i, x) # insert item x at position int i
L.pop() # returns and removes the last item
L.pop(i) # returns and removes the item at position int i
L.reverse()
L = sorted (L, key=str.casefold) # case insentitive / ignore case
L.remove(x) # removes the first occurrence of item x
L.count(x) # how many items x are in the list
L.index(x) # gives the position of the first x in list
s="".join(L)
x in L 
x not in L
# search for first match in list:
i = l_cont.index(1234)
line_footer = l_cont.index("")
# list to string
s = "\n".join(L)

# string to list
L = s.split("\n")

Initiate an "empty" list of certain length: consisting of certain number of None elements:

l = [None] * 10

merge 2 lists to list of tuples

data = list(zip(data_x, data_y))

Cartesian product of lists / tuples

import itertools
for i in itertools.product(*listOfLists):
    print(i)

# remove duplicate values from list
myList = list(dict.fromkeys(myList))
# via set
myUniqueValues = set(myDict.values())


Looping over lists

modify each item in list by adding constant string

l = [s + ';' + v for v in l]

modify item in list

for idx, line in enumerate(cont):
  if "K1001/1" in line:
    line = "K1001/1 Test Nr " + str(i) + "\n"
    cont[idx] = line
    break

remove empty values from end

while L[-1] == "":
  # L = L[0:-1]
  L.pop()

modify or even remove certain items

# from https://stackoverflow.com/a/6024599
# iterates in-situ via index and reversing
for i in range(len(somelist) - 1, -1, -1):
    element = somelist[i]
    do_action(element)
    if check(element):
        del somelist[i]

Multi Dim Lists

lAllPoints = []
lAllPoints.append = ['a','b','c']
# or using tuple
lAllPoints.append = ('a','b','c')

sort multidim list

lAllPoints = sorted(lAllPoints, key=lambda x: x[0], reverse=False)
data_all = sorted(data_all, key=lambda row: (row['Wann'], row['Wer']), reverse=False)

Tuples

Ordered sequence, with no ability to replace or delete items

L = (1,2,3,4,5,6)

list -> tuple

l = tuple(l)

combine 2 tuples

l = la + lb

Dictionaries

like hash in Perl

d = {'keyx': valuex, 'keyy': valuey}
d['keyz'] = valuez
tel.keys()
['keyx', 'keyy', 'keyz']
del d['keyy']

len(d)
d.clear()
d.copy()
d.keys()
d.values()
d.items() # returns a list of tuples (key, value)
d.get(k) # returns value of key k
d.get(k, x) # returns value of key k; if k is not in d it returns x
d.pop(k) # returns and removes item k
d.pop(k, x) # returns and removes item k; if k is not in d it returns x
x in d
x not in d

# loop over all keys and retrieve there values as well 
for key, value in d.items():
  print (f"{key} = {value}")

# sort keys:
for userid in sorted (dict.keys()):
# sort values reversed
for id, value in sorted(d.items(), key=lambda item: item[1], reverse=True):

MultiDim Dictionaries

dicProductivity = {} 
dicProductivity['Cursor'] = {}
dicProductivity['Cursor']['Nr'] = 1
dicProductivity['Cursor']['Prod'] = 1.909E18
dicProductivity['Cursor']['Cost'] = 0
dicProductivity['Cursor']['Img'] = 'templates/Shop01Cursor.png'
dicProductivity['Grandma'] = {}
dicProductivity['Grandma']['Nr'] = 2
dicProductivity['Grandma']['Prod'] = 1.725E18
dicProductivity['Grandma']['Cost'] = 0
dicProductivity['Grandma']['Img'] = 'templates/Shop02Grandma.png'

for k in dicProductivity.keys() :
   print(k)
   if 'Img' in dicProductivity[k] :
       print("ja")

Alternatively one can use a tuple as key for dictionary:

d = fetchDataAsDict()
myTuple = (d["description"], d["meaning"], d["source"], d["fileName"])
dict_with_tuple_as_key[myTuple] = value

Loops

for / while controls

break    = exit loop
continue = cancel current iteration and go to start of next iteration

ATTENTION: The loops do not create a new variable scope. Only functions and modules introduce a new scope!

for i in range (10):
    print(i)
del (i)
while i <= 100:
   i+=1
   ...
   if sth:
       break

for i in range(1, 5):
  print i
  if sth:
    continue

for f in list :

inline if (requires a dummy else):

print("something") if self.verbose else 0

methods/functions

example:
def get_labeled_exif(exif: dict) -> dict:
    """converts the exif key IDs into strings and returns that readable dict"""
    labeled = {}
    for (key, val) in exif.items():
        labeled[TAGS.get(key)] = val
    return labeled

asserts function argument validation

aus Python Kurs von Carsten Knoll

def eine_funktion(satz, ganzzahl, zahl2, liste):
  if not type(satz) == str:
    print "Datentpyfehler: satz"
    return -1
  if not isinstance(ganzzahl, int):
    print "Datentpyfehler: ganzzahl"
    return -2
  if not isinstance(liste, (tuple, list)):
    print "Datentpyfehler: liste"
    return -3
  # Kompakteste Variante (empfohlen): 
  assert zahl2 > 0, "Error: zahl2 ist nicht > 0" # Assertation-Error bei Nichterfuellung
def F(x):
  if not isinstance(x, (float, int)):
    msg = "Zahl erwartet, %s bekommen" % type(x)
    raise ValueError(msg)
  return x**2

better:

def F(x):
  assert isinstance(x, (float, int)), "Error: x is not of type float or int"
  return x**2
assert variant in ['normal', 'gray', 'cannyedge'], "Error: variant is not in 'normal', 'gray', 'cannyedge'"

Imports

import sys
import datetime
import time
import math
import random
import os.path
# Import my files
import MyFile # without tailing .py
# import a file, not stored in the same folder
import sys
sys.path.append("../libs/MyFile ")

Math

see Python - Math for linear regression

Python 2: get rid of the annoying integer division: [1]

from __future__ import division

Modulo

15 % 4
--> 3

Random

import random
random.randint(1000000, 9999999)

Date and Time

from datetime import date, datetime, timedelta
date_a = date.fromisoformat('2020-03-10')
date_today = date.today()
date_yesterday = date.today() - timedelta(days=1)
dt_today = datetime.today()

[2]
parsing iso dates

from datetime import datetime
dt = datetime.fromtimestamp(myTimestamp)
dt = datetime.fromisoformat('2017-01-01T12:30:59.000000')
dt = datetime.fromisoformat(str[:-1]) # to remove "Z" from end
dt = datetime.fromisoformat('2020-03-10 06:01:01+00:00')
import datetime
d = datetime.date.today().strftime("%y%m%d")
d = datetime.datetime.today().strftime("%y%m%d-%H%M")
# now in UCT
d = datetime.datetime.now(datetime.timezone.utc)
# now in UTC without milliseconds
d = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'


rounding datetime

from datetime import datetime, timedelta
def floor_dt_minutes(dt: datetime, res: int = 5) -> datetime:
    """ floor (=round down) minutes to X min resolution """
    minNew = res * (dt.minute // res)
    return dt.replace(minute=minNew, second=0, microsecond=0)

def ceil_dt_minutes(dt: datetime, res: int = 5) -> datetime:
    """ ceil (=round up) minutes to X min resolution """
    minNew = res * (1 + dt.minute // res)
    return dt.replace(minute=0, second=0, microsecond=0) + \
        timedelta(minutes=minNew)

def round_dt_minutes(dt: datetime, res: int = 5) -> datetime:
    """ round minutes to X min resolution """
    minOldDec = float(dt.minute) + float(dt.second)/60
    minNew = res * round(minOldDec / res)
    return dt.replace(minute=0, second=0, microsecond=0) + \
        timedelta(minutes=minNew)

dt = datetime.fromisoformat('2020-03-10 06:01:01+00:00')
print(f"original: {dt}")
print(f"floored: {floor_dt_minutes(dt,5)}")
print(f"ceileded: {ceil_dt_minutes(dt,5)}")
print(f"rounded: {round_dt_minutes(dt,5)}")

timing

measure time elapsed

import time
timestart = time.time()
...
print(time.time() - timestart)

calculate time

import time
duration = 1234 # sec
print "ETA =",time.ctime(time.time()+duration)
array = time.localtime(time.time()+duration)

Exceptions

Catch keyboard interrupt and do a "save exit"

try:
  FILE = open("out.txt","w")
  while 1:
    i+=1
    print i
except KeyboardInterrupt:
  FILE.close()

Catch all exceptions

try:
  [...]
except Exception, e:
  print "Exception raised: ", e

Custom Exceptions

try: 
  raise Exception('HiHo')

Math: curve fitting

from[3]

import numpy as np
# curve-fit() function imported from scipy
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt

# Test function with coefficients as parameters
def fit_function(x, a, b):
    return a * np.exp(b * x)

p0 = [data_y[-1], 0.14]  # initial guess of parameters
param, param_cov = curve_fit( fit_function, data_x, data_y, p0, bounds=((-np.inf, -np.inf), (np.inf, np.inf)) )

print(f"Coefficients:\n{param}")
print(f"Covariance of coefficients:\n{param_cov}")

data_y_fit = []
for x in data_x:
    y = fit_function(x, param[0], param[1])
    data_y_fit.append(y)
plt.plot(data_x, data_y, 'o', color='red', label="data")
plt.plot(data_x, data_y_fit, '--', color='blue', label="fit")
plt.legend()
plt.show()

Regular Expressions

See [4]

See [5] for an online tester

matching

import re

# V0: simple 1
myPattern = '(/\*\*\* 0097_210000_0192539580000_2898977_0050 \*\*\*/.*?)($|/\*\*\*)'
myRegExp = re.compile(myPattern, re.DOTALL)
myMatch = myRegExp.search(cont)
assert myMatch != None, f"golden file not found in file {filename}"
cont_golden = myMatch.group(1)

# V1: simple 2
assert re.match('^[a-z]{2}$', d_settings['country']) != None, f"Error: county must be 2 digit lower case. We got: {d_settings['country']}"
Match email
def checkValidEMail(email: str) -> bool:
    # from https://stackoverflow.com/posts/719543/timeline bottom edit
    if not re.fullmatch(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$", email):
        print("Error: invalid email")
        quit()
    return True

substring

import re

# simple via search
lk_id = re.search('^.*timeseries\-(\d+)\.json$', f).group(1)

# simple via sub
myPattern = '^.*' + s1 + '(.*)' + s2 + '.*$'
out = re.sub(myPattern, r'\1', s)

# more robust including an assert
def substr_between(s: str, s1: str, s2: str) -> str:
    """
    returns substring of s, found between strings s1 and s2
    s1 and s2 can be regular expressions
    """
    myPattern = s1 + '(.*)' + s2
    myRegExp = re.compile(myPattern)
    myMatches = myRegExp.search(s)
    assert myMatches != None, f'E: can\'t find \'{s1}\'...\'{s2}\' in \'{s}\
    out = myMatches.group(1)
    return out
matchObj = re.search(r'(\d+\.\d+)', text)
if matchObj:
  price = float( '%s' % (matchObj).group(0) )

Naming of match groups

(?P<name>...), see [6]

Search and Replace

From [7]
re.sub(regex, replacement, str) performs a search-and-replace across subject, replacing all matches of regex in str with replacement. The result is returned by the sub() function. The str string you pass is not modified.

s = re.sub("  +", " ", s)

Splitting

From [8]
split() splits a string into a list delimited by the passed pattern. The method is invaluable for converting textual data into data structures that can be easily read and modified by Python as demonstrated in the following example that creates a phonebook.

First, here is the input. Normally it may come from a file, here we are using triple-quoted string syntax:

>>> input = """Ross McFluff: 834.345.1254 155 Elm Street
...
... Ronald Heathmore: 892.345.3428 436 Finley Avenue
... Frank Burger: 925.541.7625 662 South Dogwood Way
...
...
... Heather Albrecht: 548.326.4584 919 Park Place"""

The entries are separated by one or more newlines. Now we convert the string into a list with each nonempty line having its own entry:

>>> entries = re.split("\n+", input)
>>> entries
['Ross McFluff: 834.345.1254 155 Elm Street',
'Ronald Heathmore: 892.345.3428 436 Finley Avenue',
'Frank Burger: 925.541.7625 662 South Dogwood Way',
'Heather Albrecht: 548.326.4584 919 Park Place']

Finally, split each entry into a list with first name, last name, telephone number, and address. We use the maxsplit parameter of split() because the address has spaces, our splitting pattern, in it:

>>> [re.split(":? ", entry, 3) for entry in entries]
[['Ross', 'McFluff', '834.345.1254', '155 Elm Street'],
['Ronald', 'Heathmore', '892.345.3428', '436 Finley Avenue'],
['Frank', 'Burger', '925.541.7625', '662 South Dogwood Way'],
['Heather', 'Albrecht', '548.326.4584', '919 Park Place']]

perl grep and map

from [9]

def grep(list, pattern):
    expr = re.compile(pattern)
    return [elem for elem in list if expr.match(elem)]
def map(list, was, womit):
    return list(map(lambda i: re.sub(was, womit, i), list))
    # was   = '.*"(\d+)".*'
    # womit = r'\1'

File Access

Basic

Split path into folder, filename, ext

import os
(dirName, fileName) = os.path.split(f)
(fileBaseName, fileExtension)=os.path.splitext(fileName)
fileOut = os.path.splitext(fileIn)[0] + "-out.txt"

Checking Operating System

import os
import sys
if os.name == 'posix':
  print ('posix/Unix/Linux')
elif os.name == 'nt':
  print ('windows')
else:
  print ('unknown os')
  sys.exit(1) # throws exception, use quit() to close / die silently

accessing os envrionment variables

Get filename of python script

from sys import argv
myFilename = argv[0]
import os
print(os.getenv('tmp'))

Command Line Arguments

Read filename from commandline parameter

import sys
for filename in sys.argv:

ArgumentParser

import argparse
parser = argparse.ArgumentParser()  # construct the argument parser and parse the arguments
# -h comes automatically

# Boolean Parameter
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")  # store_true -> Boolean Value

# Choice Parameter
# restrict to a list of possible values / choices
# parser.add_argument("--choice", type=int, choices=[0, 1, 2], help="Test choices")

# Positional Parameter (like text.py 123)
# parser.add_argument("num", type=int, help="Number of things")

# Required Parameter
# parser.add_argument("-i", "--input", type=str, required=True, help="Path of input file")

# Optional Parameter
parser.add_argument("-n", "--number", type=int, help="Number of clicks")
# Optional Parameter with Default
parser.add_argument("-s", "--seconds", type=int, default=secDefault, help="Duration of clicking, default = %i (sec)" % secDefault)

args = vars(parser.parse_args())

if args["verbose"]:
    pass # do nothing
   # print ("verbosity turned on") 
if args["number"]:
    print("num=%i" % args["number"])

Run external program

Simple single process

import subprocess
process = subprocess.run(["sudo", "du", "--max-depth=1", mydir], capture_output=True, text=True)
print (process.stdout)

old, depricated way:

os.system( "gnuplot " + gpfile)

Multiprocessing

import subprocess
l_subprocesses = []  # queue list of subprocesses
max_processes = 4

def process_enqueue(new_process_parameters):
    global l_subprocesses
    # wait for free slot
    while len(l_subprocesses) >= max_processes:
        process_remove_finished_from_queue()
        time.sleep(0.1)  # sleep 0.1s
    process = subprocess.Popen(new_process_parameters,
                               stdout=subprocess.PIPE, stderr=subprocess.PIPE,
                               universal_newlines=True)
    l_subprocesses.append(process)

def process_remove_finished_from_queue():
    global l_subprocesses
    i = 0
    while i <= len(l_subprocesses) - 1:
        process = l_subprocesses[i]
        if process.poll != None:  # has already finished
            process_print_output(process)
            l_subprocesses.pop(i)
        else:  # still running
            i += 1

def process_print_output(process):
    """waits for process to finish and prints process output"""
    stdout, stderr = process.communicate()
    if stdout != :
        print(f'Out: {stdout}')
    if stderr != :
        print(f'ERROR: {stderr}')

def process_wait_for_all_finished():
    global l_subprocesses
    for process in l_subprocesses:
        process_print_output(process)
    l_subprocesses = []  # empty list of done subprocesses

process_enqueue(l_parameters1)
...
process_enqueue(l_parameters999)
process_wait_for_all_finished()

File Modifications

Copy File

shutil.copyfile(fileTemp,
                os.path.join(dest_path, fileOut))

Move/Rename file

os.rename(fileIn, fileDone)

Delete file

os.remove("file.txt")

Touch File

if os.path.exists(fname):
  os.utime(fname, None)
else:
  open(fname, 'w').close()

File Meta Data

Get file size

import os
int (os.path.getsize("moinsen.txt") )

Read Timestamp (last modified)

lasttime = os.path.getmtime(fname)

Directories / Folders

Cross platform paths

currentdir = os.curdir
mysubdir = os.path.join(currentdir, "mysubdir")

Create Dir

os.makedirs(dir, exist_ok=True) # recursively: with all parents
# or
if not os.path.isdir(dir) :
  os.mkdir(dir)

Delete folder+contents

import shutil
shutil.rmtree(d)

Fetch Dir Contents / Loop over Files

Simple

Get list of files in directory, filter dirs from list, filter by ext

dir= "/path/to/some/dir"
listoffiles = [ f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir ,f)) and f.lower()[-4:] == ".gpx"]
listoffiles.sort()

alternative loop via glob

import glob, os
os.chdir("/mydir")
for f in glob.glob("*.txt"):
    print(f)

or simply os.listdir:

import os
for f in os.listdir("/mydir"):
    if f.endswith(".txt"):
        print(os.path.join("/mydir", f))

new in 3.5: scandir

for f in os.scandir('./'):
    if f.is_file():
        (filename, fileext) = os.path.splitext(f.name)
Traverse in Subdirs
# walk into path an fetch all files matching extension jpe?g
files = []
for (dirpath, dirnames, filenames) in os.walk('.'):
    dirpath = dirpath.replace('\\', '/')
    for file in filenames:
        if file.endswith(".txt"):
            files.append(dirpath+'/'+file)
        elif re.search(r'\.jpe?g$', file, re.IGNORECASE):
            files.append(dirpath+'/'+file)

File Parsing

File General

File Read

Check if file / dir exists

import os.path  # os.path - The key to File I/O
os.path.exists("text.txt")
os.path.isfile("text.txt")
os.path.isdir("text")
os.path.isabs("/home/torben/text.txt") # Is it an absolute path
with open(cache_file, mode='r', encoding='utf-8') as fh:
  cont = fh.read()
  # or
  list = (fh.readlines())
  # or
  line =  fh.readline()
  # or
  for line in fh:
     print(line)

fh = open(filename, mode='r', encoding='utf-8')
...
fh.close()

File Write

fileOut= "out/1/out.txt"
(filepath, fileName) = os.path.split(fileOut)
# (fileBaseName, fileExtension) = os.path.splitext(fileName)
os.makedirs(filepath, exist_ok=True) # = mkdir -p

with open(fileOut, mode='w', encoding='utf-8', newline='\n') as fh:
  # w = overWrite file ; a = append to file
  # If running Python in Windows, "\n" is automatically replaced by "\r\n". To prevent this use newline='\n'
  fh.writelines(list) # no linebreaks
  # or
  fh.write('\n'.join(list))
  # or
  for line in list:
    fh.write(line)
  
  # Force update of filecontents without closing it
  fh.flush()

# alternative
fh = open(fileOut, mode='w', encoding='utf-8', newline='\n') 
...
fh.close()

INI Config File Reading

Config.ini

[Section1]
Cursor         = 205E18
Grandma        =  18E18
Farm           =  11E18
Mine           = 514E18
Factory        = 155E18

test.py

from configparser import ConfigParser
config = ConfigParser(interpolation=None) # interpolation=None -> treats % in values as char % instead of interpreting it
config.read('Config.ini', encoding='utf-8')

print(config.getint('Section1', 'key1'))
print(config.getfloat('Section1', 'key2'))
print(config.get('Section1', 'key3'))

for sec in config.sections():
  d_settings = {}
  for key in config.options(sec):
    value = config.get(sec, key)
    d_settings[key] = value
    print ("%15s : %s"  % (key, value))

CSV

CSV Read

import csv
with open('data/ref_selected_countries.csv', mode='r', encoding='utf-8') as fh:
    csv_reader = csv.DictReader(fh, dialect='excel', delimiter="\t")
    for row in csv_reader:
        print(f'\t{row["name"]} works in the {row["department"]} department')

CSV Write

plain writing

with open('data.tsv', mode='w', encoding='utf-8', newline='\n') as fh:
    csvwriter = csv.writer(fh, delimiter="\t")
    csvwriter.writerow(  
        ('Date', 'Confirmed')
    )

list of dicts writing

with open(filename+'.tsv', mode='w', encoding='utf-8', newline='\n') as fh:
    csvwriter = csv.DictWriter(fh, delimiter='\t', extrasaction='ignore', fieldnames=[
                              'date', 'occupied_percent', 'occupied', 'total'])
    csvwriter.writeheader()
    for d in myList:
        d['occupied_percent'] = round(100*d['occupied'] / d['total'], 1)
        csvwriter.writerow(d)

JSON

JSON Read

with open(download_file, mode='r', encoding='utf-8') as fh:
    d_json = json.load(fh)

JSON Write

Write dict to file in JSON format, keeping utf-8 encoding

with open('my_file.json', mode='w', encoding='utf-8', newline='\n') as fh:
    json.dump(my_dict, fh, ensure_ascii=False, sort_keys=True)


Excel

Excel Read

import openpyxl
workbook = openpyxl.load_workbook(pathToMyExcelFile, data_only=True)  # data_only : read values instead of formulas
sheet = workbook['mySheetName']
# or fetch active sheet
sheet = workbook.active
cell = sheet['A34']
# or
cell = sheet.cell(row=34, column=1) # index start here with 1
print (cell.value)
or
print(sheet.cell(column=col, row=row).value)

Excel Write

import openpyxl
workbookOut = openpyxl.Workbook()
sheetOut = workbookOut.active
cellIn = sheetOut['A34']
# or
cellIn = sheetOut[row=34, column=1] # index start here with 1
cellOut.value = 'asdf'
workbookOut.save('out.xlsx')

Image/Picture/Photo Resize and Exif Modifying

from PIL import Image, ImageFilter  # pip install Pillow
import os, sys

fileIn = '2018-02-09 13.56.25.jpg'
# Read image
img = Image.open( fileIn )

PROBLEM:
PIL Image.save() drops the IPTC data like tags, keywords, copywrite, ...
better using https://imagemagick.org instead when tags shall be kept

Resize

# Resize keeping aspect ration -> img.thumbnail
# drops exif data, exif can be added from source file via exif= in save, see below
size = 1920, 1920
img.thumbnail(size, Image.ANTIALIAS)

Export file

fileOut = os.path.splitext(fileIn)[0] + "-edit.jpg"
try:
    img = Image.open(fileIn)
    img.save(fp=fileOut, format="JPEG", quality='keep')  # exif=dict_exif_bytes
    # JPEG Parameters
    # * qualitiy : 'keep' or 1 (worst) to 95 (best), default = 75. Values above 95 should be avoided.
    # * dpi : tuple of integers representing the pixel density, (x,y)
except IOError:
    print("cannot write file '%s'" % fileOut)

Export Progressive / web optimized JPEG

from PIL import ImageFile  # for MAXBLOCK for progressive export
fileOut = os.path.splitext(fileIn)[0] + "-progressive.jpg"
try:
    img.save(fp=fileOut, format="JPEG", quality=80, optimize=True, progressive=True)
except IOError:
    ImageFile.MAXBLOCK = img.size[0] * img.size[1]
    img.save(fp=fileOut, format="JPEG", quality=80, optimize=True, progressive=True)

JPEG Meta Data: EXIF and IPTC

IPTC: Tags/Keywords
from iptcinfo3 import IPTCInfo  # this works in pyhton 3!
iptc = IPTCInfo(fileIn)
if len(iptc['keywords']) > 0:  # or supplementalCategories or contacts
    print('====> Keywords')
    for key in sorted(iptc['keywords']):
        s = key.decode('ascii')  # decode binary strings
        print(s)
EXIF via piexif
import piexif  # pip install piexif
exif_dict = piexif.load(img.info['exif'])
print(exif_dict['GPS'][piexif.GPSIFD.GPSAltitude])
# returns list of 2 integers: value and donator  -> v / d
# (340000, 1000) => 340m
# (51, 2) => 25.5m

# Modify altitude
exif_dict['GPS'][piexif.GPSIFD.GPSAltitude] = (140, 1)  # 140m

# write to file
exif_bytes = piexif.dump(exif_dict)
fileOut = os.path.splitext(fileIn)[0] + "-modExif.jpg"
try:
    img.save(fp=fileOut, format="jpeg", exif=exif_bytes, quality='keep')
except IOError:
    print("cannot write file '%s'" % fileOut)

or

exif_dict = piexif.load(fileIn)
for ifd in ("0th", "Exif", "GPS", "1st"):
    print("===" + ifd)
    for tag in exif_dict[ifd]:
        print(piexif.TAGS[ifd][tag]["name"], "\t",
              tag, "\t", exif_dict[ifd][tag])
print(exif_dict['0th'][306]) # 306 = DateTime
EXIF via exifread
# Open image file for reading (binary mode)
fh = open(fileIn, 'rb')
# Return Exif tags
exif = exifread.process_file(fh)
fh.close()
# for tag in exif.keys():
#     if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):
#         print("%s\t%s" % (tag, exif[tag]))
print(exif['Image DateTime'])
print(exif['GPS GPSLatitude'])
print(exif['GPS GPSLongitude'])
EXIF GPS via PIL
# from https://developer.here.com/blog/getting-started-with-geocoding-exif-image-metadata-in-python3
def get_exif(filename):
    image = Image.open(filename)
    image.verify()
    image.close()
    return image._getexif()


def get_labeled_exif(exif):
    labeled = {}
    for (key, val) in exif.items():
        labeled[TAGS.get(key)] = val
    return labeled

def get_geotagging(exif):
    if not exif:
        raise ValueError("No EXIF metadata found")
    geotagging = {}
    for (idx, tag) in TAGS.items():
        if tag == 'GPSInfo':
            if idx not in exif:
                raise ValueError("No EXIF geotagging found")
            for (key, val) in GPSTAGS.items():
                if key in exif[idx]:
                    geotagging[val] = exif[idx][key]
    return geotagging


def get_decimal_from_dms(dms, ref):
    degrees = dms[0][0] / dms[0][1]
    minutes = dms[1][0] / dms[1][1] / 60.0
    seconds = dms[2][0] / dms[2][1] / 3600.0
    if ref in ['S', 'W']:
        degrees = -degrees
        minutes = -minutes
        seconds = -seconds
    return round(degrees + minutes + seconds, 5)


def get_coordinates(geotags):
    lat = get_decimal_from_dms(
        geotags['GPSLatitude'], geotags['GPSLatitudeRef'])
    lon = get_decimal_from_dms(
        geotags['GPSLongitude'], geotags['GPSLongitudeRef'])
    return (lat, lon)


exif = get_exif(fileIn)
exif_labeled = get_labeled_exif(exif)
print(exif_labeled['DateTime'])

geotags = get_geotagging(exif)
print(get_coordinates(geotags))

Template Matching

see Python - CV2

Optical Character Recognition (OCR)

see Python - OCR

GPX parsing

import gpxpy
import gpxpy.gpx
# Elevation data by NASA: see lib at https://github.com/tkrajina/srtm.py
fh_gpx_file = open(gpx_file_path, 'r')
gpx = gpxpy.parse(fh_gpx_file)
#  Loops for accessing the data
for track in gpx.tracks:
    for segment in track.segments:
        for point in segment.points:
for waypoint in gpx.waypoints:
for route in gpx.routes:
    for point in route.points: 
# interesting properties of point / waypoint objects:
point.time
point.latitude
point.longitude
point.source
waypoint.name

Templates/Snippets

Logging

V2: File and STDOUT

import logging
# from [10]
# create logger
logger = logging.getLogger('myLogger')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('mylog.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')  # %(name)s = LoggerName, %(threadName)s = TreadName
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)  

logger.debug('DebugMe')
logger.info('Starting')
logger.warning('Attention')
logger.error('Something went wrong')
logger.critical('Something seriously went wrong')

V1: STDOUT

import logging
# Logging is nicer than print, as it can automatically add the threadname
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s %(threadName)s: %(message)s',
                    )
logging.info('Starting')

Multi Threading

see Python - Multithreading

Hexadecimal

see Python - Hex


Compile to .exe

pip install pyinstaller
pyinstaller --onefile --console your.py

(Python - py2exe is deprecated)

Cryptography and Hashing

Hashing via SHA256

def gen_SHA256_string(s: str) -> str:
    m = hashlib.sha256()
    m.update(s.encode('ascii'))
    return m.hexdigest()

Hashing via MD5

(MD5 is not secure, better use SHA256)

def gen_MD5_string(s: str) -> str:
    m = hashlib.md5()
    m.update(s.encode('ascii'))
    return m.hexdigest()

Password hashing via bcrypt

import bcrypt
pwd = 'geheim'
pwd = pwd.encode("utf-8")
# or 
pwd = b'geheim'

hashed = bcrypt.hashpw(pwd, bcrypt.gensalt())
if bcrypt.checkpw(pwd, hashed):
    print("It Matches!")
    print(hashed.decode("utf-8"))

To use version 2a instead of 2b (default):

bcrypt.gensalt(prefix=b"2a")

Process Bar

see tqdm

from tqdm import tqdm
for i in tqdm(range(10000)):
    ....

CGI Web development

# Print necessary headers.
print("Content-Type: text/html")
print()

# errors and debugging info to browser
import cgitb
cgitb.enable()


Access URL or Form Parameters

# V2 from https://www.tutorialspoint.com/python/python_cgi_programming.htm
import cgi
form = cgi.FieldStorage()
username = form.getvalue('username')
print(username)
# V1
import sys
import urllib.parse
query = os.environ.get('QUERY_STRING')
query = urllib.parse.unquote(query, errors="surrogateescape")
d = dict(qc.split("=") for qc in query.split("&"))
print(d)

CGI Backend Returning JSONs

#!/usr/local/bin/python3.6
# -*- coding: utf-8 -*-

import cgi
import json

# Print necessary headers.
print("Content-type: application/json")
print()

def get_form_parameter(para: str) -> str:
    "asserts that a given parameter is set and returns its value"
    value = form.getvalue(para)
    assert value, f"Error: parameter {para} missing"
    assert value != "", f"Error: parameter {para} missing"
    return value
 
response = {}
response['status'] = "ok"

try:
    action = get_form_parameter("action")
    response['action'] = action
    if action == "myAction":
        ...

except Exception as e:
    response['status'] = "error"
    d = {"type": str(type(e)), "text": str(e)}
    response["exception"] = d

finally:
    print(json.dumps(response))

Pandas

import pandas as pd

# create
df = pd.DataFrame()
df = pd.DataFrame(data={'Deaths_Covid_2020': l})
df = pd.read_csv('data/de-states/de-state-DE-total.tsv', sep="\t")

# create from lists and calculate rolling average, min, max columns
data = zip(l_dates, l_deaths2016, l_deaths2017,
           l_deaths2018, l_deaths2019, l_deaths2020)
df = pd.DataFrame(data, columns=['Day', '2016', '2017',
                                 '2018', '2019', '2020'])
df['2016_roll'] = df['2016'].rolling(window=7, min_periods=1).mean().round(1)
df['2017_roll'] = df['2017'].rolling(window=7, min_periods=1).mean().round(1)
df['2018_roll'] = df['2018'].rolling(window=7, min_periods=1).mean().round(1)
df['2019_roll'] = df['2019'].rolling(window=7, min_periods=1).mean().round(1)
df['2020_roll'] = df['2020'].rolling(window=7, min_periods=1).mean().round(1)
# mean value of 4 columns
df['2016_2019_mean'] = df.iloc[:, [1, 2, 3, 4]
                               ].mean(axis=1)  # not column 0 = day
df['2016_2019_mean_roll'] = df['2016_2019_mean'].rolling(
    window=7, min_periods=1).mean().round(1)

df['2016_2019_roll_max'] = df.iloc[:, [6, 7, 8, 9]].max(axis=1)
df['2016_2019_roll_min'] = df.iloc[:, [6, 7, 8, 9]].min(axis=1)

# prepend empty values to df
# Jan und Feb values are missing for Covid Deaths series, so I need a couple of empty rows
l = [None] * 59
df1 = pd.DataFrame(data={'Deaths_Covid_2020': l})
# append df to df 
df_covid_2020 = pd.DataFrame()
df_covid_2020['Deaths_Covid_2020'] = df1['Deaths_Covid_2020'].append(
    df2['Deaths_Covid_2020'], ignore_index=True)

# ensure first row is from 28.2
assert (df2.iloc[0]['Date'] ==
        '2020-02-28'), "Error of start date, expecting 2020-02-28"

# copy
df2['Date'] = df0['Date']

# drop 2 rows from the beginning
df2.drop([0, 1], inplace=True)

Databases

PostgreSQL

Basics

import psycopg2
import psycopg2.extras

credentials= {'host': 'localhost', 'port': 5432, 'database': 'myDB', 'user': 'myUser', 'password': 'myPwd'}
connection = psycopg2.connect(**credentials)
cursor = connection.cursor(cursor_factory=psycopg2.extras.DictCursor)

l_bind_vars = [['A1', 'A2'], ['B1', 'B2']]

sql = """
SELECT * FROM myTable
WHERE 1=1 
AND status NOT IN ('CLOSED') 
AND ColA = %s 
AND ColB = %s 
ORDER BY created DESC
"""
cursor.execute(sql, l_bind_vars )
d_data = dict(cursor.fetchone())

export result to csv file

sql1 = "SELECT * FROM table"
sql2 = "COPY (" + sql1 + ") TO STDOUT WITH CSV HEADER DELIMITER '\t'"
        with open("out.csv", "w") as file:
            cursor.copy_expert(sql2, file)

ReadConfigFile to connect to PostgreSQL

from [11] database.ini

[postgresql]
host=dbhost
port=5432
database=dbname
user=dbuser
password=dbpass

config.py

from configparser import ConfigParser
def config(filename='database.ini', section='postgresql'):
   parser = ConfigParser()
   parser.read(filename)
   # get section, default to postgresql
   db = {}
   if parser.has_section(section):
       params = parser.items(section)
       for param in params:
           db[param[0]] = param[1]
   else:
       raise Exception(
           'Section {0} not found in the {1} file'.format(section, filename))
   return db

main.py

import psycopg2
from config import config 

def connect():
   """ Connect to the PostgreSQL database server """
   conn = None
   try:
       params = config() # read connection parameters
       print('Connecting to the PostgreSQL database...')
       conn = psycopg2.connect(**params)
       cur = conn.cursor() # create a cursor
       print('PostgreSQL database version:')
       cur.execute('SELECT version()')  # execute a statement
       db_version = cur.fetchone()
       print(db_version)
       cur.close()
   except (Exception, psycopg2.DatabaseError) as error:
       print(error)
   finally:
       if conn is not None:
           conn.close()
           print('Database connection closed.')

if __name__ == '__main__':
   connect()

SQL Lite / SQLite

see page SQLite

Internet Access

Send E-Mails

see Python - eMail


Download file

in Python 3 this is the preferred way of downloading files:

import urllib.request
url = "https://pomber.github.io/covid19/timeseries.json"
filedata = urllib.request.urlopen(url)
datatowrite = filedata.read()
with open('test.json', 'wb') as fh:
    fh.write(datatowrite)

Download HTML and extract elements from table using xpath

from lxml import html
import requests
page = requests.get(url)
tree = html.fromstring(page.content)
tbody_trs = tree.xpath('//*/tbody/tr')
l_rows = []
for tr in tbody_trs:
    l_columns = []
    if len(tr) != 15:
        continue
    for td in tr:
        l_columns.append(td.text_content())
        l_rows.append(list(l_columns))

Call Rest API

def perform_rest_call_str(url: str) -> str:
    resp = requests.get(url)
    if resp.status_code != 200:
        raise Exception(
            f'E: bad response. status code:{resp.status_code}, text:\n{resp.text}')
    return resp.text

GUI Interactions

Take Screenshot

import pyautogui # (c:\Python\Scripts\)pip install pyautogui
# pyautogui does only support screenshots on monitor #1
...
screenshot = pyautogui.screenshot()
# screenshot = pyautogui.screenshot(region=(screenshotX,screenshotY, screenshotW, screenshotH))
screenshot = np.array(screenshot) 
# Convert RGB to BGR 
screenshot = screenshot[:, :, ::-1].copy()

Mouse Actions

def clickIt(x,y,key="") :
  x0, y0 = pyautogui.position()
  if key != "": # crtl, shift
    pyautogui.keyDown(key)
  pyautogui.moveTo(x, y, duration=0.2)
  pyautogui.click(x=x , y=y, button='left', clicks=1, interval=0.1)
  if key != "": # crtl, shift
    pyautogui.keyUp(key)
  pyautogui.moveTo(x0, y0)

Web Automation

from selenium import webdriver
from selenium.webdriver.common.keys import Keys

# from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options

import os
import time
import glob

class StravaUserMapDL():
    def __init__(self):
        self.driver = webdriver.Firefox()

    def login(self):
        driver = self.driver
        url = "https://www.somewebpage.com"
        email = "myemail"
        password = "mypassword"
        driver.get(url)

        title = driver.title
        urlIs = driver.current_url
        cont = driver.page_source #  as string
        FILE = open(filename,"w") # w = overWrite file ; a = append to file
        FILE.write(cont)
        FILE.close()         

        # handle login if urlIs != url
        if (urlIs != url): 
            # activate checkbox 'remember_me'
            elem = driver.find_element_by_id('remember_me')
            if (elem.is_selected() == False):
                elem.click()
            assert elem.is_selected() == True
            elem = driver.find_element_by_id('email')
            elem.send_keys(email)
            elem = driver.find_element_by_id('password')
            elem.send_keys(password)
            elem.send_keys(Keys.RETURN)
            # Wait until login pages is replaced by real page
            urlIs = driver.current_url
            while (urlIs != url):
                time.sleep(1)
                urlIs = driver.current_url
            print (urlIs)

            # results = driver.find_elements_by_class_name('following')
            # results = driver.find_elements_by_tag_name('li')

            # print(results[0].text)
        assert (urlIs == url)

Unit Tests using Web Automation

import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys

#from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options

import os
import time

class PythonOrgSearch(unittest.TestCase):
#    def __init__(self,asdf):
#        self.driver = webdriver.Firefox() 

    def setUp(self):
        print ("setUp")
        # headless mode:
        # opts = Options()
        # opts.set_headless()
        # assert opts.headless  # Operating in headless mode
        # self.driver = webdriver.Firefox(options=opts)

        self.driver = webdriver.Firefox()

    def test_search_in_python_org(self):
        driver = self.driver
        driver.get("http://www.python.org")
        self.assertIn("Python", driver.title)
        elem = driver.find_element_by_name("q")
        elem.send_keys("pycon")
        elem.send_keys(Keys.RETURN)
        assert "No results found." not in driver.page_source
        print ("fertig: python_org")

    def tearDown(self):
        print ("tearDown")
        print ("close Firefox")
        self.driver.close() # close tab
        self.driver.quit() # quit browser
        # os._exit(1) # exit unittest without Exception


if __name__ == "__main__":
    try:
        unittest.main()
    except SystemExit as e:
        os._exit(1)

Debugging

Print name of the current function, useful to place at every function

import sys
...
if self.verbose:
  print("=== " + sys._getframe().f_code.co_name + " ===")
pip install ipython
...
from IPython import embed  
...
embed()  # to drop into iPython Shell from within the code

Or use the editor PyCharm or Visual Studio Code to set breakpoints to drop into python debugger

Profiling: time and number of calls per function

Var 1: via external calling

python -m cProfile myScript.py > 1.txt

Var 2: loading the module in the .py script

import cProfile
pr = cProfile.Profile()
pr.enable()
<... do some work ...>
pr.disable()
pr.print_stats()