#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Complete solution for the CS101 Programming Project about matrices.
This file defines a class Matrix, to be extended as much as possible.
Examples
--------
Importing the module:
>>> from matrix import *
>>> from matrix import Matrix as M # shortcut
Defining a matrix by giving its list of rows:
>>> A = M([[1, 0], [0, 1]])
>>> A == eye(A.n)
True
>>> B = 2*(A**2) + 4*A + eye(A.n)
>>> B
[[7, 0], [0, 7]]
>>> B == 7 * eye(A.n)
True
Indexing and slicing:
>>> A[1,:] = 2; A
[[1, 0], [2, 2]]
>>> A[0, 0] = -5; A
[[-5, 0], [2, 2]]
Addition, multiplication, power etc:
>>> C = eye(2); C
[[1, 0], [0, 1]]
>>> C + (3 * C) - C
[[3, 0], [0, 3]]
>>> (4 * C) ** 2
[[16, 0], [0, 16]]
See the other file 'tests.py' for *many* examples.
- @date: Tue Apr 07 14:09:03 2015.
- @author: Lilian Besson for CS101 course at Mahindra Ecole Centrale 2015.
- @licence: MIT Licence (http://lbesson.mit-license.org).
"""
from __future__ import division
from math import factorial # For exp(A)
# Experimental: computation with fraction to be exact and not numerically approximative !
from decimal import Decimal as _Decimal
[docs]class Decimal(_Decimal):
""" Extended fractions.Decimal class to improve the str and repr methods.
If there is not digit after the comma, print it as an integer."""
def __str__(self, *args, **kwargs):
if int(self) == self:
return "{}".format(int(self))
else:
return _Decimal.__str__(self)
__repr__ = __str__
from fractions import Fraction as _Fraction
[docs]class Fraction(_Fraction):
""" Extended fractions.Fraction class to improve the str and repr methods.
If the denominator is 1, print it as an integer."""
def __str__(self, *args, **kwargs):
if self.denominator == 1:
return "{}".format(self.numerator)
else:
return "{}/{}".format(self.numerator, self.denominator)
__repr__ = __str__
# ========================================================================
[docs]class Matrix(object):
""" A class to represent matrices of size (n, m).
M = Matrix(listrows) will have three attributes:
- M.listrows list of rows vectors (as list),
- M.n or M.rows number of rows,
- M.m or M.cols number of columns (ie. length of the rows).
All the required special methods are implemented, so Matrix objects can be used as numbers.
Warning: all the rows should have the same size.
"""
[docs] def __init__(self, listrows):
""" Create a matrix object from the list of row vectors M.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.listrows
[[1, 2, 3], [4, 5, 6]]
"""
try:
self._n = len(listrows)
if self._n == 0:
self._m = 0
else:
self._m = len(listrows[0])
assert all(self.m == len(listrows[i]) for i in xrange(self._n))
# Now we do get a fresh copy of that list.
#: self.listrows is the list of rows for self
self.listrows = [[listrows[i][j] for j in xrange(self._m)] for i in xrange(self._n)]
# FIXME We should forbid modifying these attributes from outside the class
self._i0, self._j0 = 0, 0
except:
raise ValueError("Matrix() accepts only a list of rows vectors (ie. list of lists) as its argument.")
# This decorator @property makes this method an attributes
# cf. https://docs.python.org/2/library/functions.html#property
@property
def n(self):
""" Getter for the read-only attribute A.n (number of rows).
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.n
2
>>> A.rows == A.n
True
"""
return self._n
rows = n
# This decorator @property makes this method an attributes
# cf. https://docs.python.org/2/library/functions.html#property
@property
def m(self):
""" Getter for the read-only attribute A.m (size of the rows, ie. number of columns).
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.m
3
>>> A.cols == A.m
True
"""
return self._m
cols = m
# ... # keep working from that point
# ====================================================================
# Methods for reading and accessing
[docs] def __getitem__(self, (i, j)):
""" A[i, j] <-> A.listrows[i][j] reads the (i, j) element of the matrix A.
- *Experimental* support of slices: A[a:b:k, j], or A[i, c:d:l] or A[a:b:k, c:d:l].
- Default values for a and c is a start point of 0, b and d is a end point of max size, and k and l is a step of 1.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A[0, 0]
1
>>> A[0, :]
[[1, 2, 3]]
>>> A[-1, :]
[[4, 5, 6]]
>>> A[:, 0]
[[1], [4]]
>>> A[1:, 1:]
[[5, 6]]
>>> A[:, ::2]
[[1, 3], [4, 6]]
"""
if isinstance(i, int):
if isinstance(j, int):
return self.listrows[i][j]
elif isinstance(j, slice):
# i is an integer, j is a slice object
return Matrix([self.listrows[i][j]])
elif isinstance(i, slice):
if isinstance(j, int):
# i is a slice object, j is an integer
return Matrix([[x[j]] for x in self.listrows[i]])
elif isinstance(j, slice):
# i and j are a slice objects
return Matrix([x[j] for x in self.listrows[i]])
# In case i and j are neither integers nor slice objects
raise ValueError("Matrix.__getitem__ invalid argument. A[i, j] with i = {} (type(i) is {}) and j = {} (type(i) is {}).".format(i, type(i), j, type(j)))
[docs] def __setitem__(self, (i, j), value):
""" A[i, j] = value: will update the (i, j) element of the matrix A.
- *Experimental* support for slice arguments: A[a:b:k, j] = sub-row, or A[i, c:d:l] = sub-column or A[a:b:k, c:d:l] = submatrix.
- Default values for a and c is a start point of 0, b and d is a end point of max size, and k and l is a step of 1.
- TODO: clean up the code, improve it.
- TODO: Write more doctests!
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A[0, 0] = 4; A
[[4, 2, 3], [4, 5, 6]]
>>> A[:, 0]
[[4], [4]]
>>> A[-1, :] = 9; A
[[4, 2, 3], [9, 9, 9]]
>>> A[1, 1] = 3; A
[[4, 2, 3], [9, 3, 9]]
>>> A[0, :] = [3, 2, 1]; A
[[3, 2, 1], [9, 3, 9]]
>>> A[1:, 1:] = -1; A
[[3, 2, 1], [9, -1, -1]]
>>> A[1:, 1:] *= -8; A
[[3, 2, 1], [9, 8, 8]]
"""
fail = False
if isinstance(value, Matrix):
value = value.listrows # just the list of rows
if isinstance(i, int):
if isinstance(j, int):
self.listrows[i][j] = value
# This is the simple case, the one we use the most
elif isinstance(j, slice):
# i is an integer, j is a slice object
j_value = 0
# for j0 in _slice_to_xrange(j):
for j0 in xrange(*j.indices(self.cols)):
try:
self.listrows[i][j0] = value[0][j_value] # sub-column
except Exception:
try:
self.listrows[i][j0] = value[j_value] # list
except Exception:
self.listrows[i][j0] = value
j_value += 1
# End for loop j0
else:
fail = True
elif isinstance(i, slice):
if isinstance(j, int):
# i is a slice object, j is an integer
i_value = 0
# for i0 in _slice_to_xrange(i):
for i0 in xrange(*i.indices(self.rows)):
try:
self.listrows[i0][j] = value[i_value][0] # sub-row
except Exception:
try:
self.listrows[i0][j] = value[i_value] # list
except Exception:
self.listrows[i0][j] = value
i_value += 1
# End for loop i0
elif isinstance(j, slice):
# i and j are a slice objects
i_value = 0
j_value = 0
# for i0 in _slice_to_xrange(i):
for i0 in xrange(*i.indices(self.rows)):
# for j0 in _slice_to_xrange(j):
for j0 in xrange(*j.indices(self.cols)):
try:
self.listrows[i0][j0] = value[i_value][j_value] # sub-matrix
except Exception:
try:
self.listrows[i0][j0] = value[i_value] # list
except Exception:
self.listrows[i0][j0] = value
j_value += 1
# End for loop i0
i_value += 1
# End for loop i0
else:
fail = True
if fail:
# In case i and j are neither integers nor slice objects
raise ValueError("Matrix.__setitem__ invalid argument. A[i, j] with i = {} (type(i) is {}) and j = {} (type(i) is {}).".format(i, type(i), j, type(j)))
# row and col to access a row or a column
[docs] def row(self, i):
""" A.row(i) -> extracts the i-th row of A, as a new matrix.
- Warning: modifying A.row(i) does NOT modify the matrix A.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.row(0)
[[1, 2, 3]]
>>> A.row(1)
[[4, 5, 6]]
>>> r = A.row(0); r *= 3
>>> A # it has not been modified!
[[1, 2, 3], [4, 5, 6]]
"""
return Matrix([[self[i, j] for j in xrange(self.m)]])
[docs] def col(self, j):
""" A.col(j) -> extracts the j-th column of A, as a new matrix.
- Warning: modifying A.col(j) does NOT modify the matrix A.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.col(0)
[[1], [4]]
>>> A.col(2)
[[3], [6]]
>>> c = A.col(1); c *= 6
>>> A # it has not been modified!
[[1, 2, 3], [4, 5, 6]]
"""
return Matrix([[self[i, j]] for i in xrange(self.n)])
# ====================================================================
# Special method for copying (not required in the project)
#def __hash__(self):
# """ hash(A) <-> A.__hash__() computes the hash of the matrix (just depends on A.listrows).
#
# - Is required if we want to be able to insert a matrix in a set or dictionary.
# - FIXME: unhashable type: 'list'
# """
# return hash(self.listrows)
[docs] def copy(self):
""" A.copy() -> a shallow copy of the matrix A.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = A.copy()
>>> A[0, 0] = -10; A
[[-10, 2, 3], [4, 5, 6]]
>>> B # It has not been modified!
[[1, 2, 3], [4, 5, 6]]
"""
return Matrix(self.listrows)
# ====================================================================
# Length and shape
[docs] def __len__(self):
""" len(A) returns A.n * A.m.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> len(A)
6
>>> len(A) == A.n * A.m
True
"""
return self.n * self.m
# This decorator @property makes this method an attributes
# cf. https://docs.python.org/2/library/functions.html#property
@property
def shape(self):
""" A.shape is (A.n, A.m) (similar to the shape attribute of NumPy arrays).
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
"""
return (self.n, self.m)
# ====================================================================
# Transposition
[docs] def transpose(self):
""" A.transpose() is the transposition of the matrix A.
- Returns a new matrix!
- Definition: if B = A.transpose(), then B[i, j] is A[j, i].
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A.transpose()
[[1, 4], [2, 5], [3, 6]]
>>> A.transpose().transpose() == A
True
"""
return Matrix([[self[j, i] for j in xrange(self.n)] for i in xrange(self.m)])
@property
def T(self):
""" A.T <-> A.transpose() is the transposition of the matrix A.
>>> B = Matrix([[1, 4], [2, 5], [3, 6]])
>>> B.T
[[1, 2, 3], [4, 5, 6]]
>>> B == B.T.T
True
"""
return self.transpose()
# ====================================================================
# Methods for pretty-printing
[docs] def __str__(self):
""" str(A) <-> A.__str__() converts the matrix A to a string (showing the list of rows vectors).
>>> B = Matrix([[1, 4], [2, 5], [3, 6]])
>>> str(B)
'[[1, 4], [2, 5], [3, 6]]'
"""
try:
return str(self.map(str).listrows).replace("'", "")
except Exception:
str(self.listrows)
[docs] def __repr__(self):
""" repr(A) <-> A.__repr__() converts the matrix A to a string (showing the list of rows vectors).
>>> B = Matrix([[1, 4], [2, 5], [3, 6]])
>>> repr(B)
'[[1, 4], [2, 5], [3, 6]]'
"""
return str(self)
# Comparing
[docs] def __eq__(self, B):
""" A == B <-> A.__eq__(B) compares the matrix A with B.
- Time complexity is O(n * m) for matrices of size (n, m).
>>> B = Matrix([[1, 4], [2, 5], [3, 6]])
>>> B == B
True
>>> B + B + B == 3*B == B + 2*B == 2*B + B
True
>>> B - B + B == 1*B == -B + 2*B == 2*B - B == 2*B + (-B)
True
>>> B != B
False
"""
try:
if self.n == B.n and self.m == B.m:
return all(self[i, j] == B[i, j] for j in xrange(self.m) for i in xrange(self.n))
# return all(a == b for a, b in zip(self, B))
else:
return False
except Exception:
return False
[docs] def almosteq(self, B, epsilon=1e-10):
""" A.almosteq(B) compares the matrix A with B, numerically with an error threshold of epsilon.
- Default epsilon is $10^{-10}$
- Time complexity is O(n * m) for matrices of size (n, m).
>>> B = Matrix([[1, 4], [2, 5], [3, 6]])
>>> C = B.copy(); C[0,0] += 4*1e-6
>>> B == C
False
>>> B.almosteq(C)
False
>>> B.almosteq(C, epsilon=1e-4)
True
>>> B.almosteq(C, epsilon=1e-5)
True
>>> B.almosteq(C, epsilon=1e-6)
False
"""
try:
if self.n == B.n and self.m == B.m:
return all(abs(self[i, j] - B[i, j]) < epsilon for j in xrange(self.m) for i in xrange(self.n))
else:
return False
except Exception:
return False
# ====================================================================
# Methods for computing
# Sum (left and right)
[docs] def __add__(self, B):
""" A + B <-> A.__add__(B) computes the sum of the matrix A and B.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for matrices of size (n, m).
- If B is a number, the sum is done coefficient wise.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> A + A
[[2, 4, 6], [8, 10, 12]]
>>> B = ones(A.n, A.m); B
[[1, 1, 1], [1, 1, 1]]
>>> A + B
[[2, 3, 4], [5, 6, 7]]
>>> B + A
[[2, 3, 4], [5, 6, 7]]
>>> B + B + B + B + B + B + B
[[7, 7, 7], [7, 7, 7]]
>>> B + 4 # Coefficient wise!
[[5, 5, 5], [5, 5, 5]]
>>> B + (-2) # Coefficient wise!
[[-1, -1, -1], [-1, -1, -1]]
>>> B + (-1.0) # Coefficient wise!
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
"""
if isinstance(B, Matrix):
# Sum of two matrices
assert self.n == B.n and self.m == B.m
return Matrix([[self[i, j] + B[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
else:
# Sum of matrix A and a number B
return Matrix([[self[i, j] + B for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __radd__(self, B):
""" B + A <-> A.__radd__(B) computes the sum of B and the matrix A.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for matrices of size (n, m).
- If B is a number, the sum is done coefficient wise.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 1 + A
[[2, 3, 4], [5, 6, 7]]
>>> B = ones(A.n, A.m)
>>> 4 + B # Coefficient wise!
[[5, 5, 5], [5, 5, 5]]
>>> (-2) + B # Coefficient wise!
[[-1, -1, -1], [-1, -1, -1]]
>>> (-1.0) + B # Coefficient wise!
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
"""
if isinstance(B, Matrix):
# Sum of two matrices
# (never used here : B + A <-> B.__add__(A))
assert self.n == B.n and self.m == B.m
return Matrix([[B[i, j] + self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
else:
# Sum of matrix A and a number B (coefficients wise)
return Matrix([[B + self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
# ====================================================================
# Substraction (left and right)
[docs] def __sub__(self, B):
""" A - B <-> A.__sub__(B) computes the difference of the matrix A and B.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for matrices of size (n, m).
- If B is a number, the sum is done coefficient wise.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = ones(A.n, A.m)
>>> A - B
[[0, 1, 2], [3, 4, 5]]
>>> B - A
[[0, -1, -2], [-3, -4, -5]]
>>> A - 1 # Coefficient wise!
[[0, 1, 2], [3, 4, 5]]
>>> B - 2 # Coefficient wise!
[[-1, -1, -1], [-1, -1, -1]]
>>> A - 3.14 # Coefficient wise!
[[-2.14, -1.14, -0.14], [0.86, 1.86, 2.86]]
"""
if isinstance(B, Matrix):
# Sum of two matrices
assert self.n == B.n and self.m == B.m
return Matrix([[self[i, j] - B[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
else:
# Sum of matrix A and a number B
return Matrix([[self[i, j] - B for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __neg__(self):
""" -A <-> A.__neg__() computes the opposite of the matrix A.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for a matrix of size (n, m).
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> -A
[[-1, -2, -3], [-4, -5, -6]]
>>> A - A == A + (-A)
True
>>> -(-A) == A
True
>>> -------A == -A # Crazy syntax!
True
"""
return Matrix([[-self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __pos__(self):
""" +A <-> A.__pos__() computes the positive of the matrix A.
- Returns a new matrix!
- Useless?
- Time and memory complexity is O(n * m) for a matrix of size (n, m).
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> +A == A
True
>>> +-+-+-+-+++----+-+-+----++++A == A # Crazy syntax, again!
True
>>> s = '+-+-+-+-+++----+-+-+----++++'
>>> s.count('-') % 2 == 0 # We check that we had an even number of minus symbol
True
"""
return Matrix([[+self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __rsub__(self, B):
""" B - A <-> A.__rsub__(B) computes the difference of B and the matrix A.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for matrices of size (n, m).
- If B is a number, the sum is done coefficient wise.
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> 1 - A # Coefficient wise!
[[0, -1, -2], [-3, -4, -5]]
>>> B = ones(A.n, A.m)
>>> (-1) - B # Coefficient wise!
[[-2, -2, -2], [-2, -2, -2]]
>>> ((-1) - B) == -(1 + B) == -(B + B)
True
"""
if isinstance(B, Matrix):
# Sum of two matrices
# (never used here : B - A <-> B.__sub__(A))
assert self.n == B.n and self.m == B.m
return Matrix([[B[i, j] - self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
else:
# Sum of matrix A and a number B (coefficients wise)
return Matrix([[B - self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
# ====================================================================
# Product (left and right)
[docs] def __mul__(self, B):
""" A * B <-> A.__mul__(B) computes the product of the matrix A and B.
- Returns a new matrix!
- Time and memory complexity is O(n * m * p) for a matrix A of size (n, m) and B of size (m, p).
- If B is a number, the product is done coefficient wise.
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = eye(A.n); B
[[1, 0], [0, 1]]
>>> A * B == B * A == A
True
>>> A * A
[[7, 10], [15, 22]]
>>> A * (A * A) == (A * A) * A
True
>>> A * 1 == A # Coefficient wise!
True
>>> A * 12.011993 # Coefficient wise!
[[12.011993, 24.023986], [36.035979, 48.047972]]
"""
if isinstance(B, Matrix):
# Product of two matrices
assert self.m == B.n
return Matrix([[sum(self[i, k] * B[k, j] for k in xrange(self.m)) for j in xrange(B.m)] for i in xrange(self.n)])
else:
# Product of matrix A and a number B (coefficients wise)
return Matrix([[self[i, j] * B for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __rmul__(self, B):
""" B * A <-> A.__rmul__(B) computes the product of B and the matrix A.
- Returns a new matrix!
- Time and memory complexity is O(n * m * p) for a matrix A of size (n, m) and B of size (m, p).
- If B is a number, the product is done coefficient wise.
>>> A = Matrix([[1, 2], [3, 4]])
>>> 1 * A == A # Coefficient wise!
True
>>> 12.011993 * A # Coefficient wise!
[[12.011993, 24.023986], [36.035979, 48.047972]]
"""
if isinstance(B, Matrix):
# Product of two matrices
# (never used here : B * A <-> B.__mul__(A))
assert self.n == B.m
return Matrix([[sum(B[i, k] * self[k, j] for k in xrange(B.m)) for j in xrange(self.m)] for i in xrange(B.n)])
else:
# Product of matrix A and a number B (coefficients wise)
return Matrix([[B * self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def multiply_elementwise(self, B):
""" A.multiply_elementwise(B) computes the product of the matrix A and B, element-wise (Hadamard product).
- Returns a new matrix!
- Time and memory complexity is O(n * m * p) for a matrix A of size (n, m) and B of size (m, p).
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = eye(A.n)
>>> A.multiply_elementwise(B)
[[1, 0], [0, 4]]
>>> A.multiply_elementwise(A) # A .^ 2 in Matlab?
[[1, 4], [9, 16]]
"""
if not isinstance(B, Matrix):
raise ValueError("A.multiply_elementwise(B): B has to be a Matrix object.")
else:
assert self.shape == B.shape
return Matrix([[self[i, j] * B[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
# ====================================================================
# Division (left and right)
[docs] def __div__(self, B):
""" A / B <-> A * (B ** (-1)) computes the division of the matrix A by B.
- Returns a new matrix!
- Performs true division!
- Time and memory complexity is O(n * m * p * max(m,p)**2) for a matrix A of size (n, m) and B of size (m, p).
- If B is a number, the division is done coefficient wise.
These examples are failing in a simple Python console, but work fine in an IPython console...
**But I do not know why!**
"""
"""
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = eye(A.n)
>>> B.almosteq(A / A)
True
>>> C = B.map(float)
>>> A / C == A * C == A
True
>>> A / B == A * B == A
True
>>> A / 2 # Coefficient wise! FIXME?
[[0, 1], [1, 2]]
>>> A / 2.0 # Coefficient wise!
[[0.5, 1.0], [1.5, 2.0]]
"""
print "self.__div__:", B, type(B) # FIXME: remove when debug is done.
if isinstance(B, Matrix):
# Product of two matrices
return self * (B.inv())
else:
# Division of matrix A and a number B (coefficients wise)
# return Matrix([[self[i, j] / float(B) for j in xrange(self.m)] for i in xrange(self.n)])
return Matrix([[self[i, j] / B for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __floordiv__(self, B):
""" A / B <-> A * (B ** (-1)) computes the division of the matrix A by B.
- Returns a new matrix!
- Time and memory complexity is O(n * m * p) for a matrix A of size (n, m) and B of size (m, p).
- If B is a number, the division is done coefficient wise with an **integer division**.
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = eye(A.n); C = B.map(float)
>>> A // C == A * C == A
True
>>> A // B == A * B == A
True
>>> A // 2 # Coefficient wise!
[[0, 1], [1, 2]]
>>> A // 2.0 # Coefficient wise!
[[0.0, 1.0], [1.0, 2.0]]
"""
if isinstance(B, Matrix):
# Product of two matrices
return self * (B.inv())
else:
# Division of matrix A and a number B (coefficients wise)
return Matrix([[self[i, j] // B for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __mod__(self, b):
""" A % b <-> A.__mod__(b) computes the modulus coefficient-wise of the matrix A by b.
- Returns a new matrix!
- Time and memory complexity is O(n * m) for a matrix A of size (n, m).
>>> A = Matrix([[1, 2], [3, 4]])
>>> A % 2
[[1, 0], [1, 0]]
>>> (A*100) % 31
[[7, 14], [21, 28]]
>>> (A*100) % 33 == A # Curious property
True
>>> (A*100) % 35
[[30, 25], [20, 15]]
"""
return Matrix([[self[i, j] % b for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def __rdiv__(self, B):
""" B / A <-> A.__rdiv__(B) computes the division of B by A.
- If B is 1 (B == 1), 1 / A is A.inv() (special case!)
- If B is a number, the division is done coefficient wise.
- Returns a new matrix!
- Time and memory complexity is O(n * m * p) for a matrix A of size (n, m) and B of size (m, p).
These examples are failing in a simple Python console, but work fine in an IPython console...
**But I do not know why!**
"""
"""
>>> A = Matrix([[1, 2], [3, 4]])
>>> B = eye(A.n)
>>> 1 / B == B == B / 1
True
>>> C = B.map(float)
>>> 1 / B == B == B / 1
True
>>> A.inv() == 1 / A # special case!
True
>>> 2 / (2*A)
[[2.0, 1.0], [0.666666666667, 0.5]]
"""
print "self.__rdiv__:", B, type(B) # FIXME: remove when debug is done.
if B == 1:
return self.inv()
elif isinstance(B, Matrix):
return B * (self.inv())
else:
# Division of a number B and matrix A (coefficients wise)
return Matrix([[B / self[i, j] for j in xrange(self.m)] for i in xrange(self.n)])
# ====================================================================
# Power, exponential and inverse
[docs] def __pow__(self, k):
""" A ** k <-> A.__pow__(k) to compute the product of the square matrix A (with the quick exponentation trick).
- Returns a new matrix!
- k has to be an integer (ValueError will be returned otherwise).
- Time complexity is O(n^3 log(k)) for a matrix A of size (n, n).
- Memory complexity is O(n^2).
- Use A.inv() to (try to) compute the inverse if k < 0.
- More details are in the solution for the Problem II of the 2nd Mid-Term Exam for CS101.
>>> A = Matrix([[1, 2], [3, 4]])
>>> A ** 1 == A
True
>>> A ** 2
[[7, 10], [15, 22]]
>>> A * A == A ** 2
True
>>> B = eye(A.n)
>>> B == B ** 1 == A ** 0 == B ** 0
True
>>> divmod(2015, 2)
(1007, 1)
>>> 2015 == 1007*2 + 1
True
>>> A ** 2015 == ((A ** 1007) ** 2 ) * A
True
>>> C = diag([1, 4])
>>> C ** 100
[[1, 0], [0, 1606938044258990275541962092341162602522202993782792835301376]]
>>> C ** 100 == diag([1**100, 4**100])
True
It also accept negative integers:
>>> A ** (-1) == A.inv()
True
>>> C = (A ** (-1)); C
[[-2.0, 1.0], [1.5, -0.5]]
>>> C * A == eye(A.n) == A * C
True
>>> C.listrows # Rounding mistakes can happen (but not here)
[[-2.0, 1.0], [1.5, -0.5]]
>>> D = C.round(); D.listrows
[[-2.0, 1.0], [1.5, -0.5]]
>>> D * A == eye(A.n) == A * D # No rounding mistake!
True
>>> (C * A).almosteq(eye(A.n))
True
>>> (A ** (-5)) == (A ** 5).inv() == (A.inv()) ** 5
False
>>> (A ** (-5)).round() == ((A ** 5).inv()).round() == ((A.inv()) ** 5).round() # No rounding mistake!
True
"""
if not isinstance(k, int):
raise ValueError("A ** k: k should be an integer (here k = {}).".format(k))
elif k < 0:
# A ^ k = (A ^ (-1)) ^ (-k)
return (self.inv()) ** (abs(k))
elif k == 0:
return eye(self.n) # This is a convention : a ** 0 = 1_n,n
elif k == 1:
return self.copy() # It is really import to get a copy !
elif k == 2: # Useless in fact
return self * self
elif k % 2 == 0:
P = self * self
return P ** int(k // 2) # A^(2k) = ((A**2) ^ k)
elif k % 2 == 1:
P = self * self
return self * (P ** int((k-1) // 2)) # A^(2k+1) = A * ((A**2) ^ k)
# Remark: this case is not tail recursive (we could have used an accumulator)
else:
raise ValueError("A ** k: invalid value for the power k = {k}.".format(k=k))
[docs] def exp(self, limit=100):
r""" A.exp() computes a numerical approximation of the exponential of the square matrix A.
- Raise a ValueError exception if A is not square.
- Note: $\exp(A) = \mathrm{e}^A$ is defined as the series $\sum_{k=0}^{+\infty} \frac{A^k}{k!}$.
- We only compute the first `limit` terms of this series, hopping that the partial sum will be close to the entire series.
- Default value for `limit` is 100 (it should be enough for any matrix).
>>> from math import e
>>> import math
>>> I = eye(10); I[0, :]
[[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
>>> I * e == I.exp() == diag([e] * I.n) # Rounding mistakes!
False
>>> (I * e).round() == I.exp().round() == diag([e] * I.n).round() # No more rounding mistakes!
True
>>> C = diag([1, 4])
>>> C.exp() == diag([e ** 1, e ** 4]) == diag([math.exp(1), math.exp(4)]) # Rounding mistakes!
False
>>> C.exp().almosteq(diag([e ** 1, e ** 4])) # No more rounding mistakes!
True
>>> diag([e ** 1, e ** 4]).almosteq(diag([math.exp(1), math.exp(4)]))
True
"""
assert limit > 0
if not self.is_square:
raise ValueError("A.exp() is only possible if A is a square matrix.")
e = eye(self.n)
for k in xrange(1, limit):
e += (self ** k) * (1.0 / factorial(k))
return e
[docs] def inv(self):
""" A.inv() computes the inverse of the square matrix A (if possible), with the Gauss-Jordan algorithm.
- Raise a ValueError exception if A is not square.
- Raise a ValueError exception if A is singular.
>>> A = Matrix([[1, 2], [3, 4]])
>>> A.inv()
[[-2.0, 1.0], [1.5, -0.5]]
>>> A * A.inv() == A.inv() * A == eye(A.n) # Rounding mistake can happen (but not here)
True
>>> Ai = A.inv().round() # No more rounding mistake!
>>> A * Ai == Ai * A == eye(A.n)
True
>>> A.det
-2
>>> O = Matrix([[1, 2], [0, 0]])
>>> O.is_singular
True
>>> O.inv() # O is singular!
Traceback (most recent call last):
...
ValueError: A.inv() on a singular matrix (ie. non inversible).
>>> O.det
0
"""
if not self.is_square:
raise ValueError("A.inv() is only possible if A is a square matrix.")
else:
try:
_, inverse = self.gauss_jordan(inv=True)
return inverse
except Exception:
raise ValueError("A.inv() on a singular matrix (ie. non inversible).")
# ====================================================================
# Gauss elimination process (to get a row echelon form)
[docs] def gauss(self, det=False, verb=False, mode=None, maxpivot=False):
""" A.gauss() implements the Gauss elimination process on matrix A.
When possible, the Gauss elimination process produces a row echelon form by applying linear operations to A.
- If maxpivot is true, we look for the pivot with higher absolute value (can help reducing rounding mistakes).
- If verb is True, some details are printed at each steps of the algorithm.
- mode can be None (default), or 'f' for fractions or 'd' for decimal numbers.
- Reference is https://en.wikipedia.org/wiki/Gaussian_elimination#Definitions_and_example_of_algorithm
- We chosed to apply rows operations only: it uses elementary operations on lines/rows: L_i' <-- L_i - gamma * L_k (method swap_rows).
- Can swap two columns in order to select the bigger pivot (increases the numerical stability).
- The function will raise a ValueError if the matrix a is singular (ie. Gauss process cannot conclude).
- If det is true, the returned value is c, d with c the row echelon form, and d the determinant. Reference for this part is https://en.wikipedia.org/wiki/Gaussian_elimination#Computing_determinants.
>>> Matrix([[1, 2], [3, 4]]).gauss()
[[1, 2], [0, -2]]
>>> Matrix([[1, 2], [1, 2]]).gauss()
[[1, 2], [0, 0]]
>>> Matrix([[1, 2], [-1, -0.5]]).gauss()
[[1, 2], [0, 1.5]]
>>> Matrix([[1, 2], [3, 4]]).gauss(maxpivot=True)
[[2, 1], [0, 1]]
>>> Matrix([[1, 2], [1, 2]]).gauss(maxpivot=True)
[[2, 1], [0, 0]]
>>> Matrix([[1, 2], [3, 4]]).gauss(det=True)
([[1, 2], [0, -2]], -2)
>>> Matrix([[1, 2], [1, 2]]).gauss(det=True)
([[1, 2], [0, 0]], 0)
"""
# We start with a fresh copy of self.
c = self.copy()
n, m = c.n, c.m
currentdet = 1
if mode == None:
# Trying to compute the mode ourself
if all(isinstance(x, Fraction) for x in self):
mode = 'f'
elif all(isinstance(x, Decimal) for x in self):
mode = 'd'
elif mode == 'f':
try:
currentdet = Fraction(1)
c = self.map(Fraction)
except Exception as e:
print "Failed to convert to Fraction:", e
c = self.copy()
elif mode == 'd':
try:
currentdet = Decimal(1)
c = self.map(Decimal)
except Exception as e:
print "Failed to convert to Decimal:", e
c = self.copy()
for k in xrange(min(n, m)):
if verb:
print "\nTrying to find the {}-th pivot:".format(k)
print "With these indeces:", range(k, m)
print "And that array:", [abs(c[k, j]) for j in xrange(m)]
if maxpivot:
i_max = _argmax(range(k, m), [abs(c[k, j]) for j in xrange(m)])
else:
i_max = k
if c[k, i_max] == 0:
for possible_i_max in xrange(k, m):
if c[k, possible_i_max] != 0:
i_max = possible_i_max
break
# We found the first i_max such that c[k, i_max] is not zero, or k if none are good
if verb:
# assert c[k, i_max] == max(abs(c[k, j]) for j in xrange(m))
print "_argmax has given i_max = {}, and c[k, i_max] = {} (with k = {}).".format(i_max, c[k, i_max], k)
if c[k, i_max] == 0:
currentdet = 0
# FIXME Do we really have a singular matrix already ?
# raise ValueError("A.gauss_elimination() called on a singular matrix.")
if (not det) and verb:
print "WARNING: A.gauss() might have been called on a singular matrix. FIXME remove these warnings"
# return (c, 0) if det else c # FIXME not yet !
# determinant is 0, that is sure at least
else:
if verb:
print "c.col(i_max) is:", c.col(i_max)
print "c.col(k) is:", c.col(k)
print "i_max = {}, k = {}.".format(i_max, k)
# c.swap_rows(i_max, k)
c.swap_cols(i_max, k) # FIXME Shouldn't we swap rows instead? I think not
# Swapping two rows multiplies the determinant by -1
if i_max != k:
if verb:
print "We swapped two different lines ({} and {}), the determinant will be multiplied by -1.".format(i_max, k)
currentdet *= -1
if k >= (min(n, m) - 1):
if verb:
print "For the last line, we swapped the {i_max}-th and {k}-th rows, but nothing else.".format(i_max=i_max, k=k)
break # break the for loop RIGHT NOW
if verb:
print "Gauss Elimination: using the {k}th line (L_{k} = {l}).\n We use {pivot} as a pivot.".format(k=k, l=c.row(k), pivot=c[k, k])
# Do for all lines below pivot:
for i in xrange(k+1, n):
if mode == 'f':
gamma = Fraction(c[i, k], c[k, k])
elif mode == 'd':
gamma = Decimal(c[i, k]) / Decimal(c[k, k])
else:
# gamma = float(c[i, k]) / float(c[k, k])
gamma = c[i, k] / c[k, k]
if verb:
print " Operation L_{i}' <-- L_{i} - gamma * L_{k}".format(i=i, k=k)
print " with gamma =", gamma
print " with old L_{i} = {l}".format(i=i, l=c.row(i))
# Do for all remaining elements in current line:
for j in xrange(k+1, m):
c[i, j] -= gamma * c[k, j]
# We convert to integer if possible, it is prettier :)
# FIXME isn't it a cause of rounding mistake?
if int(c[i, j]) == c[i, j]:
c[i, j] = int(c[i, j])
# Fill lower triangular matrix with zeros (because gamma is chosen like that):
c[i, k] = 0
if verb:
print " with new L_{i}' = {l}".format(i=i, l=c.row(i))
if det:
# Product of the (-1)**(nb of swaps) * diagonal elements
currentdet *= _prod(c[i, i] for i in xrange(min(n, m)))
return c, currentdet
else:
return c
# End of gauss()
# ====================================================================
# Gauss-Jordan elimination process (to get a reduced row echelon form)
[docs] def gauss_jordan(self, inv=False, verb=False, mode=None, maxpivot=False):
""" A.gauss_jordan() implements the Gauss elimination process on matrix A.
- If inv is true, the returned value is J_n, A**(-1) with J_n the reduced row echelon form of A, and A**(-1) the computed inverse of A.
- If maxpivot is true, we look for the pivot with higher absolute value (can help reducing rounding mistakes).
"""
# We start with a fresh copy of self.
c = self.copy()
if mode == None:
# Trying to compute the mode ourself
if all(isinstance(x, Fraction) for x in self):
mode = 'f'
elif all(isinstance(x, Decimal) for x in self):
mode = 'd'
elif mode == 'f':
try:
c = self.map(Fraction)
except Exception as e:
print "Failed to convert to Fraction:", e
c = self.copy()
elif mode == 'd':
try:
c = self.map(Decimal)
except Exception as e:
print "Failed to convert to Decimal:", e
c = self.copy()
if inv:
if c.n != c.m:
raise ValueError("A.gauss_jordan(inv=True) is only possible if A is a square matrix.")
cinv = eye(c.n)
# print "OK, trying to produce the inverse of c also."
# print "At first, c is:", c
# print "Experimental construction of the augmented matrix of size (n, 2m) (with n = {}, m = {}).".format(c.n, c.m)
# c = mat_from_f(lambda i, j: c[i, j] if j < c.m else int((i+c.m)==j), c.n, 2 * c.m)
# # Reference is https://en.wikipedia.org/wiki/Augmented_matrix
# print "Now, c is:", c
n, m = c.n, c.m
r = -1
for j in xrange(m):
if verb:
print "\nj =", j, "=> current c is:", c
if inv:
print "INFO: Current cinv is:", cinv
print "Looking for the pivot with r = {r}, j = {j}.".format(r=r+1, j=j)
print "Indeces:", range(r+1, n)
print "Values:", [abs(c[i, j]) for i in xrange(n)]
# k = _argmax(range(r+1, n), [abs(c[i, j]) for i in xrange(n)])
if maxpivot:
k = _argmax(range(r+1, n), [abs(c[i, j]) for i in xrange(n)])
else:
k = r+1
if c[k, j] == 0:
for possible_k in xrange(r+1, n):
if c[k, j] != 0:
k = k
break
# We found the first i_max such that c[k, i_max] is not zero, or k if none are good
if verb:
print "For r = {}, c[k, j] is the pivot (k = {}, j = {}), equals to {}.".format(r+1, k, j, c[k, j])
if inv and c[k, j] == 0:
raise ValueError("A.gauss_jordan() called on a singular matrix.")
if c[k, j] != 0:
pivot = c[k, j]
r += 1
if verb:
print "Pivot is not zero ({}), so we divide the k-th (k = {}) row by the pivot.".format(pivot, k)
# c[k, :] /= pivot # Divising the k-th row by the pivot
for jjjj in xrange(m):
# c[k, jjjj] /= pivot
if mode == 'f':
c[k, jjjj] = Fraction(c[k, jjjj], pivot)
elif mode == 'd':
c[k, jjjj] = Decimal(c[k, jjjj] / pivot)
else:
c[k, jjjj] /= pivot
if inv:
if verb:
print "INFO: Same linear operation is applied to cinv: cinv[k, :] /= pivot"
# cinv[k, :] /= pivot
for jjjj in xrange(m):
# cinv[k, jjjj] /= pivot
if mode == 'f':
cinv[k, jjjj] = Fraction(cinv[k, jjjj], pivot)
elif mode == 'd':
cinv[k, jjjj] = Decimal(cinv[k, jjjj] / pivot)
else:
cinv[k, jjjj] /= pivot
if k != r:
if verb:
print "We swap the rows r = {} and k = {}.".format(r, k)
print "Before: R_k =", c[k, :], "R_r =", c[r, :]
c.swap_rows(k, r) # Swap the rows r and k
# c.swap_cols(k, r) # Swap the columns r and k
if inv:
if verb:
print "INFO: Same linear operation is applied to cinv: cinv.swap_rows(k, r)"
cinv.swap_rows(k, r) # Swap the rows r and k
# cinv.swap_cols(k, r) # Swap the columns r and k
if verb:
print "After: R_k =", c[k, :], "R_r =", c[r, :]
for i in xrange(n):
if verb:
print "For i = {}.".format(i)
if i != r and c[i, j] != 0:
cij = c[i, j]
if verb:
print "Before: R_{i} =", c[i, :]
print "R_{i} <-- R_{i} - c[{i}, {j}] * R_{r}. c[{i}, {j}] is {cij}".format(i=i, j=j, r=r, cij=cij)
c[i, :] -= c[r, :] * cij
if inv:
if verb:
print "INFO: Same linear operation is applied to cinv: cinv[i, :] -= cinv[r, :] * c[i, j]"
cinv[i, :] -= cinv[r, :] * cij
# R_i <-- R_i - c[i,j] * R_r
if verb:
print "After: R_{i} =", c[i, :]
else:
if verb:
print "No modification here (i = r or c[i, j] = 0)."
# Done
if verb:
print "Done ! c =", c
# c = c.map(lambda x: int(x) if int(x) == x else x) # Pretty priting only
if inv:
if verb:
print "Done ! cinv =", cinv
return c, cinv
else:
return c
# ====================================================================
# Applications of the Gauss elimination process
@property
def rank(self, verb=False):
""" A.rank uses the Gauss elimination process to compute the rank of the matrix A, by simply counting the number of non-zero elements on the diagonal of the echelon form.
- FIXME: the Gauss process has to be changed, and improved for singular matrices (when the rank is not maximum!).
"""
c = self.gauss(verb=verb, det=False)
return sum(c[i, i] != 0 for i in xrange(min(self.n, self.m)))
@property
def det(self, verb=False):
""" A.det uses the Gauss elimination process to compute the determinant of the matrix A.
- Note: because it depends of the number of elementary operations performed in the Gauss method, we had to modify the Matrix.gauss method...
"""
_, d = self.gauss(det=True, verb=verb)
if int(d) == d:
d = int(d)
return d
# ====================================================================
# Extra methods (not required in the project)
[docs] def count(self, value):
""" A.count(value) counts how many times the value is in the matrix."""
return sum(self[i, j] == value for j in xrange(self.m) for i in xrange(self.n))
[docs] def __contains__(self, value):
""" value in A <-> A.__contains__(value) tells if the value is present in the matrix."""
return any(self[i, j] == value for j in xrange(self.m) for i in xrange(self.n))
[docs] def map(self, f, *args, **kwargs):
""" Apply the function f to each of the coefficient of the matrix A (returns a new matrix)."""
return Matrix([[f(self[i, j], *args, **kwargs) for j in xrange(self.m)] for i in xrange(self.n)])
[docs] def round(self, ndigits=8):
""" A.round([ndigits=8]) -> rounds every coefficient to ndigits digits after the comma."""
return self.map(lambda x: round(x, ndigits))
# ====================================================================
# Iterating over a matrix
[docs] def __iter__(self):
""" iter(A) <-> A.__iter__() is used to create an iterator from the matrix.
- The values are looped rows by rows, then columns then columns.
- This method is called when an iterator is required for a container. This method should return a new iterator object that can iterate over all the objects in the container.
"""
for i in xrange(self.n):
for j in xrange(self.m):
yield self[i, j]
# next and __next__ for iterating over the values of our matrix
[docs] def __next__(self):
""" Python 3 compatibility (FIXME ?)."""
return self.next()
[docs] def next(self):
""" Generator for iterating the matrix A.
- The values are looped rows by rows, then columns then columns."""
if (self._i0 < self.n) and (self._j0 < self.m):
v = self[self._i0, self._j0]
if self._i0 < self.n - 1:
self._i0 += 1
else:
self._i0 = 0
self._j0 += 1
return v
else:
raise StopIteration()
# ====================================================================
# To deal nicely with matrices of complex numbers
@property
def real(self):
""" Real part of the matrix, coefficient wise."""
return self.map(lambda z: z.real if isinstance(z, complex) else z)
@property
def imag(self):
""" Imaginary part of the matrix, coefficient wise."""
return self.map(lambda z: z.imag if isinstance(z, complex) else z)
[docs] def conjugate(self):
""" Conjugate part of the matrix, coefficient wise."""
return self.map(lambda z: z.conjugate() if isinstance(z, complex) else z)
# ====================================================================
# Dot product and norm
[docs] def dot(self, v):
""" A.dot(v) <-> A . v computes the dot multiplication of the vector v.
- v can be a matrix of size (m, 1), or a list of size m.
"""
if isinstance(v, Matrix):
if self.m == v.n and v.m == 1:
return self * v
else:
raise ValueError("A.dot(v): the size of the vector should be compatible with the size of the matrix. Here self.m = {} and v.n = {}, are different.".format(self.m, v.n))
else:
# Convert the iterator v to a list, then to a column vector
try:
vector_v = Matrix([[x] for x in list(v)])
except Exception:
raise ValueError("A.dot(v): impossible to convert v = {} to a column vector.".format(v))
return self.dot(vector_v)
[docs] def norm(self, p=2):
""" A.norm() computes the p-norm of the matrix A, default is p = 2.
- Mathematically defined as p-root of the sum of the p-power of *modulus* of its coefficients.
- Reference is https://en.wikipedia.org/wiki/Matrix_norm#.22Entrywise.22_norms.
"""
result = sum(abs(x)**p for x in self) ** (1.0/p)
return int(result) if int(result) == result else result
[docs] def normalized(self, fnorm=None):
""" A.normalized() return a new matrix, which columns vectors are normalized by using the norm 2 (or the given function fnorm).
- Will not fail if a vector has norm 0 (it is simply not modified).
- Reference is https://en.wikipedia.org/wiki/Orthogonalization.
"""
if fnorm == None:
fnorm = lambda x: x.norm()
c = self.copy()
for j in xrange(c.cols):
if fnorm(c[:, j]) != 0:
c[:, j] = c[:, j] / fnorm(c[:, j])
return c
[docs] def __abs__(self):
""" abs(A) <-> A.__abs__() computes the absolute value / modulus coefficient-wise."""
return self.map(abs)
# ====================================================================
# Trace and other special values
[docs] def trace(self):
""" A.trace() computes the trace of A."""
return sum(self[i, i] for i in xrange(min(self.n, self.m)))
# TODO: Try to find an algorithm to approximatively compute eigen values, and eigen vectors ?
# ====================================================================
# Check if a matrix is square, symetric, anti-symetric, diagonal, skew-symetric (hermitian) etc
@property
def is_square(self):
""" A.is_square tests if A is square or not."""
return self.n == self.m
@property
def is_symetric(self):
""" A.is_symetric tests if A is symetric or not."""
return self.n == self.m and all(self[i, j] == self[j, i] for i in xrange(self.n) for j in xrange(self.m))
@property
def is_anti_symetric(self):
""" A.is_symetric tests if A is anti-symetric or not."""
return self.n == self.m and all(self[i, j] == -self[j, i] for i in xrange(self.n) for j in xrange(self.m))
@property
def is_diagonal(self):
""" A.is_symetric tests if A is anti-symetric or not."""
return all(self[i, j] == 0 for i in xrange(self.n) for j in xrange(self.m) if i != j)
@property
def is_hermitian(self):
""" A.is_hermitian tests if A is symetric or not."""
f = lambda z: z.conjugate() if isinstance(z, complex) else z
return self.n == self.m and all(self[i, j] == f(self[j, i]) for i in xrange(self.n) for j in xrange(self.m))
@property
def is_lower(self):
""" A.is_lower tests if A is lower triangular or not."""
return all(self[i, j] == 0 for i in xrange(self.n) for j in xrange(i+1, self.m))
@property
def is_upper(self):
""" A.is_upper tests if A is upper triangular or not."""
return all(self[i, j] == 0 for i in xrange(1, self.n) for j in xrange(i))
@property
def is_zero(self):
""" A.is_zero tests if A is the zero matrix or not."""
return all(self[i, j] == 0 for j in xrange(self.m) for i in xrange(self.n))
@property
def is_singular(self):
""" A.is_singular tests if A is singular (ie. non-invertible) or not.
- Computes the determinant by using the Gauss elimination process. """
return self.det == 0
# ====================================================================
# Linear operations *in place*
[docs] def swap_cols(self, j1, j2):
""" A.swap_cols(j1, j2) changes *in place* the j1-th and j2-th *columns* of the matrix A."""
for i in xrange(self.rows):
self[i, j1], self[i, j2] = self[i, j2], self[i, j1]
[docs] def swap_rows(self, i1, i2):
""" A.swap_rows(i1, i2) changes *in place* the i1-th and i2-th *rows* of the matrix A."""
for j in xrange(self.cols):
self[i1, j], self[i2, j] = self[i2, j], self[i1, j]
# ========================================================================
# Adjugate matrix (https://en.wikipedia.org/wiki/Adjugate_matrix)
[docs] def minor(self, i, j):
""" A.minor(i, j) <-> minor(A, i, j) returns the (i, j) minor of A, defined as the determinant of the submatrix A[i0,j0] for i0 != i and j0 != j.
- Complexities: memory is O(n^2), time is O(n^3) (1 determinant of size n-1)."""
return det(Matrix([[self[i0, j0] for j0 in xrange(self.m) if j0 != j] for i0 in xrange(self.n) if i0 != i]))
[docs] def cofactor(self, i, j):
""" A.cofactor(i, j) <-> cofactor(A, i, j) returns the (i, j) cofactor of A, defined as the (-1)**(i+j) times to (i, j) minor of A.
- Complexities: memory is O(n^2), time is O(n^3) (1 determinant of size n-1)."""
return (-1)**(i+j) * self.minor(i, j)
[docs] def adjugate(self):
""" A.adjugate() <-> adjugate(A) returns the adjugate matrix of A.
- Reference is https://en.wikipedia.org/wiki/Adjugate_matrix#Inverses.
- Complexities: memory is O(n^2), time is O(n^5) (n^2 determinants of size n-1).
- Using the adjugate matrix for computing the inverse is a BAD method : too time-consuming ! LU or Gauss-elimination is only O(n^3).
"""
return mat_from_f(self.cofactor, self.n)
[docs] def type(self):
""" A.type() returns the matrix of types of coefficients of A. """
return self.map(type)
# End of that class Matrix
# ========================================================================
# ========================================================================
# Utility functions
[docs]def ones(n, m=None):
""" ones(n, m) is a matrix of size (n, m) filled with 1. """
if not m:
m = n
return Matrix([[1 for _ in xrange(m)] for _ in xrange(n)])
[docs]def zeros(n, m=None):
""" zeros(n, m) is a matrix of size (n, m) filled with 0. """
if not m:
m = n
return Matrix([[0 for _ in xrange(m)] for _ in xrange(n)])
[docs]def eye(n):
""" eye(n) is the identity matrix of size (n, n) (1 on the diagonal, 0 outside). """
return Matrix([[1 if i == j else 0 for j in xrange(n)] for i in xrange(n)])
[docs]def diag(d):
""" diag(d) creates a matrix from a list of diagonal values. """
n = len(d)
return Matrix([[d[i] if i == j else 0 for j in xrange(n)] for i in xrange(n)])
[docs]def mat_from_f(f, n, m=None):
""" mat_from_f(f, n, m=None) creates a matrix of size (n, m) initialized with the function f : A[i, j] = f(i, j).
- Default value for m is n (square matrix).
- WARNING: f has to accept two arguments i, j.
- Remark: similar to Array.make (or Array.init) in OCaml (v3.12+) or String.create (or String.make).
"""
if not m:
m = n
return Matrix([[f(i, j) for j in xrange(m)] for i in xrange(n)])
# ========================================================================
# Functions that are just wrappers around methods
[docs]def det(A):
""" det(A) <-> A.det computes the determinant of A (in O(n^3))."""
return A.det
[docs]def rank(A):
""" rank(A) <-> A.rank computes the rank of A (in O(n^3))."""
return A.rank
[docs]def gauss(A, *args, **kwargs):
""" gauss(A) <-> A.gauss() applies the Gauss elimination process on A (in O(n^3))."""
return A.gauss_elimination(*args, **kwargs)
[docs]def gauss_jordan(A, *args, **kwargs):
""" gauss_jordan(A) <-> A.gauss_jordan() applies the Gauss-Jordan elimination process on A (in O(n^3))."""
return A.gauss_jordan(*args, **kwargs)
[docs]def inv(A):
""" inv(A) <-> A.inv() tries to compute the inverse of A (in O(n^3))."""
return A.inv()
[docs]def exp(A, *args, **kwargs):
""" exp(A) <-> A.exp() computes an approximation of the exponential of A (in O(n^3 * limit))."""
return A.exp(*args, **kwargs)
# ========================================================================
# The LU decomposition
[docs]def PLUdecomposition(A, mode=None):
""" matrix.PLUdecomposition(A) computes the permuted LU decomposition for the matrix A.
- Operates in time complexity of O(n^3), memory of O(n^2).
- mode can be None (default), or 'f' for fractions or 'd' for decimal numbers.
- Returned P, L, U that satisfy P*A = L*U, with P being a permutation matrix, L a lower triangular matrix, U an upper triangular matrix.
- Will raise a ValueError exception if A is singular.
- Reference is https://en.wikipedia.org/wiki/Gaussian_elimination#Definitions_and_example_of_algorithm
- We chosed to apply rows operations only: it uses elementary operations on lines/rows: L_i' <-- L_i - gamma * L_k (method swap_rows).
- Can swap two columns in order to select the bigger pivot (increases the numerical stability).
"""
assert A.is_square
U = A.copy()
n, m = U.n, U.m
P, L = eye(n), eye(n)
if mode == None:
# Trying to compute the mode ourself
if all(isinstance(x, Fraction) for x in U):
mode = 'f'
elif all(isinstance(x, Decimal) for x in U):
mode = 'd'
elif mode == 'f':
try:
U = U.map(Fraction)
L = L.map(Fraction)
except Exception as e:
print "Failed to convert to Fraction:", e
U = U.copy()
elif mode == 'd':
try:
U = U.map(Decimal)
L = L.map(Decimal)
except Exception as e:
print "Failed to convert to Decimal:", e
U = U.copy()
# Now we can start
for k in xrange(n):
# Find the pivot on the k-th row
i_max = k
# Is the pivot U[k, k] is zero, we find a possible better pivot
if U[k, i_max] == 0:
i_max = _argmax(range(k, m), [abs(U[k, j]) for j in xrange(m)])
# Is the pivot U[k, i_max] is still non zero, we cannot do anything, because the matrix is singular.
if U[k, i_max] == 0:
raise ValueError("PLUdecomposition() has been called on a singular matrix.")
else:
# U.swap_cols(i_max, k)
U.swap_rows(i_max, k)
# The matrix P will keep track of the permutations performed during the Gaussian Elimination process:
# P.swap_cols(i_max, k)
P.swap_rows(i_max, k)
if k >= (min(n, m) - 1):
break # break the for loop RIGHT NOW
# Do for all lines/rows below pivot:
for i in xrange(k+1, n):
if mode == 'f':
gamma = Fraction(U[i, k], U[k, k])
elif mode == 'd':
gamma = Decimal(U[i, k]) / Decimal(U[k, k])
else:
# gamma = float(U[i, k]) / float(U[k, k])
gamma = U[i, k] / U[k, k]
# Do for all remaining elements in current line:
for j in xrange(k+1, m):
# Add - gamma times row k to row i of U
U[i, j] -= gamma * U[k, j]
# We convert to integer if possible, it is prettier :)
# if int(U[i, j]) == U[i, j]:
# U[i, j] = int(U[i, j])
# Fill lower triangular matrix with zeros (because gamma is chosen like that):
if mode == 'f':
U[i, k] = Fraction(0)
elif mode == 'd':
U[i, k] = Decimal(0)
else:
U[i, k] = 0
# The entries of L below the diagonal are gradually replaced by the negatives of multiples used in the corresponding row operations of type #1.
L[i, k] = gamma
# Moreover, any pair of entries that both lie below the diagonal
# in these same two rows (i_max and k) of L must also be interchanged,
# while entries lying on and above its diagonal need to stay in their place.
for j in xrange(min(i_max, k)+1, m):
L[i_max, j], L[k, j] = L[k, j], L[i_max, j]
# L[j, i_max], L[j, k] = L[j, k], L[j, i_max]
return P, L, U
# End of PLUdecomposition()
# ========================================================================
# Other functions
[docs]def norm(A, p=2, *args, **kwargs):
""" norm(A, p) <-> A.norm(p) computes the p-norm of A (default is p = 2)."""
return A.norm(p, *args, **kwargs)
[docs]def trace(A, *args, **kwargs):
""" trace(A) <-> A.trace() computes the trace of A."""
return A.trace(*args, **kwargs)
# ========================================================================
# Random matrix
from random import randint as _randint, uniform as _uniform
[docs]def rand_matrix(n=1, m=1, k=10):
""" rand_matrix(n, m, k) generates a new random matrix of size (n, m) with each coefficients being integers, randomly taken between -k and k."""
assert k > 0
return Matrix([[_randint(-k, k) for _ in xrange(m)] for _ in xrange(n)])
[docs]def rand_matrix_float(n=1, m=1, k=10):
""" rand_matrix_float(n, m, k) generates a new random matrix of size (n, m) with each coefficients being float numbers, randomly taken between -k and k."""
assert k > 0
return Matrix([[_uniform(-k, k) for _ in xrange(m)] for _ in xrange(n)])
# del _randint # Useful or not ?
# ========================================================================
# 2 auxiliary functions used by the Gauss elimination process
[docs]def _argmax(indexes, array):
""" Compute the index i in indexes such that the array[i] is the bigger."""
besti, bestvalue = None, None
for i in indexes:
if array[i] >= bestvalue: # WARNING: Accessing array[i] does not check if i is a good index or not
besti = i
bestvalue = array[i]
if besti == None: # WARNING: not besti was not doing that !
raise ValueError("argmax() arg is a non-valid sequence.")
return besti
[docs]def _prod(iterator):
""" Compute the product of the values in the iterator.
Empty product is 1."""
p = 1
for x in iterator:
p *= x
return p
# 2 auxiliary functions for implementing the generalized __setitem__ method
[docs]def _ifnone(a, b):
""" b if a is None else a.
- Useful for converting a slice object to a xrange object."""
return b if a is None else a
[docs]def _slice_to_xrange(sliceobject):
""" Get a xrange of indeces from a slice object.
- Thanks to http://stackoverflow.com/a/13855369 !"""
return xrange(_ifnone(sliceobject.start, 0), sliceobject.stop, _ifnone(sliceobject.step, 1))
# ========================================================================
# The Gram-Schmidt orthogonalization process
[docs]def innerproduct(vx, vy):
""" Dot product of the two vectors vx and vy (real numbers ONLY)."""
assert len(vx) == len(vy)
return sum([x * y for x, y in zip(vx, vy)]) # Typo in the subject
[docs]def norm_square(u):
""" Shortcut for the square of the norm of the vector u : ||u||^2 = <u, u>."""
return innerproduct(u, u)
[docs]def vect_const_multi(vx, c):
""" Multiply the vector vx by the (real) constant c."""
return [c*x for x in vx]
[docs]def proj(u, v):
""" Projection of the vector v into the vector u (proj_u(v) as written on Wikipedia)."""
nsqu = norm_square(u)
if nsqu == 0:
return [0] * len(u)
else:
# udotu = float(nsqu) # useless, I imported division from __future__
return vect_const_multi(u, innerproduct(u, v) / nsqu)
[docs]def gram_schmidt(V, normalized=False):
""" Basic implementation of the Gram-Schmidt process, in the easy case of Rn with the usual <.,.>.
- The matrix is interpreted as a family of *column* vectors.
- Reference for notations, concept and proof is https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process.
- If normalized is True, the vectors are normalized before being returned.
"""
n, m = V.n, V.m
U = V.copy()
for k in xrange(1, n):
# U[k, :] -= sum_j(proj(U[j, :], U[k, :]))
for j in xrange(0, k-1):
p = proj(U[j, :], U[k, :])
for t in xrange(m):
U[k, t] -= p[t]
# Now u_{k} is orthogonal with all the previous u_{j} (j < k) !
if normalized:
return U.normalized()
else:
return U
# ========================================================================
# Adjugate matrix (https://en.wikipedia.org/wiki/Adjugate_matrix)
[docs]def minor(A, i, j):
""" minor(A, i, j) returns the (i, j) minor of A, defined as the determinant of the submatrix A[i0,j0] for i0 != i and j0 != j.
- Complexities: memory is O(n^2), time is O(n^3) (1 determinant of size n-1)."""
return A.minor(i, j)
[docs]def cofactor(A, i, j):
""" cofactor(A, i, j) returns the (i, j) cofactor of A, defined as the (-1)**(i+j) times to (i, j) minor of A.
- Complexities: memory is O(n^2), time is O(n^3) (1 determinant of size n-1)."""
return A.cofactor(i, j)
[docs]def adjugate(A):
""" adjugate(A) returns the adjugate matrix of A.
- Reference is https://en.wikipedia.org/wiki/Adjugate_matrix#Inverses.
- Complexities: memory is O(n^2), time is O(n^5) (n^2 determinants of size n-1).
- Using the adjugate matrix for computing the inverse is a BAD method : too time-consuming ! LU or Gauss-elimination is only O(n^3).
"""
return A.adjugate()
# ========================================================================
# TODO Solver for linear equation A.x = b
# Cramer's formula ? Gauss elimination is better.
# Use the pseudo-inverse ? https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_pseudoinverse#The_iterative_method_of_Ben-Israel_and_Cohen
# [http://mp.cpgedupuydelome.fr/cours.php?id=3805&idPartie=34487]
# ========================================================================
# We are done
if __name__ == '__main__':
print "You can run the file 'tests.py' to see examples of use of this module 'matrix.py'."
print "Testing every doctests in the module 'matrix'..."
# Each function or method I wrote includes a doctest:
import doctest
doctest.testmod()
print "\nMore details about doctest can be found on the Python documentation: \nhttps://docs.python.org/2/library/doctest.html"
A = Matrix([[1, 2], [3, 4]])