def find_root_bisect(function, x0, x1, eps=1e-5, max_iter=10000, debug=False):
    
    assert x0 < x1
    assert function(x0) < 0, "need monotonous growing function + suitable x values"
    assert function(x1) > 0, "need monotonous growing function + suitable x values"
    
    iter_count = 0
    
    while x1 - x0 > eps and iter_count < max_iter:
        x_mid = (x0 + x1) / 2
        f_mid = function(x_mid)
        if debug:
            print("{:4d} x_mid={:+.5e}  f(x_mid)={:+.5e}".format(iter_count, x_mid, f_mid))
        if f_mid < 0:
            x0 = x_mid
        else:
            x1 = x_mid
            
        iter_count += 1
        
    if x1 - x0 > eps:   # iteration failed
        return None

    return (x0 + x1) / 2.0
        
    
def x_squared_minus_2(x):
    return x * x - 2


print(find_root_bisect(x_squared_minus_2, 0, 2, debug=True))
   0 x_mid=+1.00000e+00  f(x_mid)=-1.00000e+00
   1 x_mid=+1.50000e+00  f(x_mid)=+2.50000e-01
   2 x_mid=+1.25000e+00  f(x_mid)=-4.37500e-01
   3 x_mid=+1.37500e+00  f(x_mid)=-1.09375e-01
   4 x_mid=+1.43750e+00  f(x_mid)=+6.64062e-02
   5 x_mid=+1.40625e+00  f(x_mid)=-2.24609e-02
   6 x_mid=+1.42188e+00  f(x_mid)=+2.17285e-02
   7 x_mid=+1.41406e+00  f(x_mid)=-4.27246e-04
   8 x_mid=+1.41797e+00  f(x_mid)=+1.06354e-02
   9 x_mid=+1.41602e+00  f(x_mid)=+5.10025e-03
  10 x_mid=+1.41504e+00  f(x_mid)=+2.33555e-03
  11 x_mid=+1.41455e+00  f(x_mid)=+9.53913e-04
  12 x_mid=+1.41431e+00  f(x_mid)=+2.63274e-04
  13 x_mid=+1.41418e+00  f(x_mid)=-8.20011e-05
  14 x_mid=+1.41425e+00  f(x_mid)=+9.06326e-05
  15 x_mid=+1.41422e+00  f(x_mid)=+4.31482e-06
  16 x_mid=+1.41420e+00  f(x_mid)=-3.88434e-05
  17 x_mid=+1.41421e+00  f(x_mid)=-1.72643e-05
1.4142112731933594

Python also knows "anoymous functions", so called "lambda" functions. The are intended to be passed around, and are restricted compared to regular functions (eg no "for", "if", ..):

Syntax is lambda <ARGS>: <RETURN_VALUE>:

print(find_root_bisect(lambda x: x**2 - 2, 0, 2))
1.4142112731933594
print(lambda a, b: a + b)
<function <lambda> at 0x1056d5c80>
print((lambda a, b: a + b)(1, 2))
3
def find_root_newton(function, derivative, x0, eps=1e-5, max_iter=10, debug=False):
    
    iter_count = 0
    f0 = function(x0)
    
    while abs(f0) > eps and iter_count < max_iter:
        if debug:
            print("{:4d} x0={:+.5e} f(x0)={:+.5e}".format(iter_count, x0, f0))
        x0 = x0 - f0 / derivative(x0)
        iter_count += 1
        f0 = function(x0)
        
    if abs(function(x0)) > eps:   # iteration failed
        return None

    return x0
        
    
def x_squared_minus_2(x):
    return x * x - 2


def two_x(x):
    return 2 * x


print(find_root_newton(x_squared_minus_2, two_x, 10, debug=True))
   0 x0=+1.00000e+01 f(x0)=+9.80000e+01
   1 x0=+5.10000e+00 f(x0)=+2.40100e+01
   2 x0=+2.74608e+00 f(x0)=+5.54095e+00
   3 x0=+1.73719e+00 f(x0)=+1.01785e+00
   4 x0=+1.44424e+00 f(x0)=+8.58237e-02
   5 x0=+1.41453e+00 f(x0)=+8.82829e-04
1.4142135968022693
print(find_root_newton(lambda x: x*x - 2, lambda x: 2*x, 10, debug=True))
   0 x0=+1.00000e+01 f(x0)=+9.80000e+01
   1 x0=+5.10000e+00 f(x0)=+2.40100e+01
   2 x0=+2.74608e+00 f(x0)=+5.54095e+00
   3 x0=+1.73719e+00 f(x0)=+1.01785e+00
   4 x0=+1.44424e+00 f(x0)=+8.58237e-02
   5 x0=+1.41453e+00 f(x0)=+8.82829e-04
1.4142135968022693

Comparison:

Bisection:

  • slow (many iterations)
  • two suitable starting values required
  • derivative not necessary

Newton iterations:

  • fast (less iterations)
  • only one staring value required
  • derivative required
import random
import math


def montecarlo_pi(n_points):
    hits = 0
    for i in range(n_points):
        x = random.random()
        y = random.random()
        if math.hypot(x, y) <= 1:
            hits += 1
    return hits / n_points * 4


for n_points in (10, 100, 1000, 10000, 100000, 1000000):
    pi_approx = montecarlo_pi(n_points)
    err = abs(math.pi - pi_approx)
    print("{:7d} {:.8f}  {:.5e}".format(n_points, pi_approx, err))
     10 3.20000000  5.84073e-02
    100 2.84000000  3.01593e-01
   1000 3.19200000  5.04073e-02
  10000 3.15240000  1.08073e-02
 100000 3.14512000  3.52735e-03
1000000 3.14271600  1.12335e-03
def leibnitz_pi(n_summands):
    sum_ = 0.0
    sign = 1
    for i in range(n_summands):
        sum_ += sign / (2 * i + 1)
        sign = -sign
    return sum_ * 4


for n_summands in (10, 100, 1000, 10000, 100000):
    pi_approx = leibnitz_pi(n_summands)
    err = abs(math.pi - pi_approx)
    print("{:6d} {:.8f}  {:.5e}".format(n_summands, pi_approx, err))
    10 3.04183962  9.97530e-02
   100 3.13159290  9.99975e-03
  1000 3.14059265  1.00000e-03
 10000 3.14149265  1.00000e-04
100000 3.14158265  1.00000e-05

Comparison:

  • monte carlo converges slow (error ~ $\frac{1}{\sqrt{n}}$)
  • leibnitz converges faster (error ~ $\frac{1}{n}$)

Additional much faster approach (not in required):

We use $$ \sum_1^n \frac{1}{n^4} \to \frac{\pi^4}{90} $$

def leibnitz_bessel(n_summands):
    sum_ = 0.0
    for i in range(1, n_summands):
        sum_ += 1 / (i ** 4)
    return (90 * sum_) ** .25


for n_summands in (10, 100, 1000, 10000, 100000):
    pi_approx = leibnitz_bessel(n_summands)
    err = abs(math.pi - pi_approx)
    print("{:6d} {:.8f}  {:.5e}".format(n_summands, pi_approx, err))
    10 3.14131204  2.80614e-04
   100 3.14159241  2.45539e-07
  1000 3.14159265  2.42248e-10
 10000 3.14159265  2.01172e-13
100000 3.14159265  2.01172e-13