Navigation

Source code for sympy.matrices.matrices

from__future__importprint_function,divisionimportcollectionsfromsympy.core.addimportAddfromsympy.core.basicimportBasic,Atomfromsympy.core.exprimportExprfromsympy.core.functionimportcount_opsfromsympy.core.logicimportfuzzy_andfromsympy.core.powerimportPowfromsympy.core.symbolimportSymbol,Dummy,symbolsfromsympy.core.numbersimportInteger,ilcm,Floatfromsympy.core.singletonimportSfromsympy.core.sympifyimportsympifyfromsympy.core.compatibilityimportis_sequence,default_sort_key,range,NotIterablefromsympy.polysimportPurePoly,roots,cancel,gcdfromsympy.simplifyimportsimplifyas_simplify,signsimp,nsimplifyfromsympy.utilities.iterablesimportflatten,numbered_symbolsfromsympy.functions.elementary.miscellaneousimportsqrt,Max,Minfromsympy.functionsimportexp,factorialfromsympy.printingimportsstrfromsympy.core.compatibilityimportreduce,as_int,string_typesfromtypesimportFunctionTypedef_iszero(x):"""Returns True if x is zero."""returnx.is_zero

def__array__(self):from.denseimportmatrix2numpyreturnmatrix2numpy(self)def__len__(self):"""Return the number of elements of self. Implemented mainly so bool(Matrix()) == False. """returnself.rows*self.cols@property

[docs]defkey2bounds(self,keys):"""Converts a key with potentially mixed types of keys (integer and slice) into a tuple of ranges and raises an error if any index is out of self's range. See Also ======== key2ij """islice,jslice=[isinstance(k,slice)forkinkeys]ifislice:ifnotself.rows:rlo=rhi=0else:rlo,rhi=keys[0].indices(self.rows)[:2]else:rlo=a2idx(keys[0],self.rows)rhi=rlo+1ifjslice:ifnotself.cols:clo=chi=0else:clo,chi=keys[1].indices(self.cols)[:2]else:clo=a2idx(keys[1],self.cols)chi=clo+1returnrlo,rhi,clo,chi

[docs]defkey2ij(self,key):"""Converts key into canonical form, converting integers or indexable items into valid integers for self's range or returning slices unchanged. See Also ======== key2bounds """ifis_sequence(key):ifnotlen(key)==2:raiseTypeError('key must be a sequence of length 2')return[a2idx(i,n)ifnotisinstance(i,slice)elseifori,ninzip(key,self.shape)]elifisinstance(key,slice):returnkey.indices(len(self))[:2]else:returndivmod(a2idx(key,len(self)),self.cols)

[docs]defevalf(self,prec=None,**options):"""Apply evalf() to each element of self."""returnself.applyfunc(lambdai:i.evalf(prec,**options))

[docs]defLUsolve(self,rhs,iszerofunc=_iszero):"""Solve the linear system Ax = rhs for x where A = self. This is for symbolic matrices, for real or complex ones use mpmath.lu_solve or mpmath.qr_solve. See Also ======== lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve QRsolve pinv_solve LUdecomposition """ifrhs.rows!=self.rows:raiseShapeError("`self` and `rhs` must have the same number of rows.")A,perm=self.LUdecomposition_Simple(iszerofunc=_iszero)n=self.rowsb=rhs.permuteFwd(perm).as_mutable()# forward substitution, all diag entries are scaled to 1foriinrange(n):forjinrange(i):scale=A[i,j]b.zip_row_op(i,j,lambdax,y:x-y*scale)# backward substitutionforiinrange(n-1,-1,-1):forjinrange(i+1,n):scale=A[i,j]b.zip_row_op(i,j,lambdax,y:x-y*scale)scale=A[i,i]b.row_op(i,lambdax,_:x/scale)returnrhs.__class__(b)

[docs]defLUdecomposition_Simple(self,iszerofunc=_iszero):"""Returns A comprised of L, U (L's diag entries are 1) and p which is the list of the row swaps (in order). See Also ======== LUdecomposition LUdecompositionFF LUsolve """ifnotself.is_square:raiseNonSquareMatrixError("A Matrix must be square to apply LUdecomposition_Simple().")n=self.rowsA=self.as_mutable()p=[]# factorizationforjinrange(n):foriinrange(j):forkinrange(i):A[i,j]=A[i,j]-A[i,k]*A[k,j]pivot=-1foriinrange(j,n):forkinrange(j):A[i,j]=A[i,j]-A[i,k]*A[k,j]# find the first non-zero pivot, includes any expressionifpivot==-1andnotiszerofunc(A[i,j]):pivot=iifpivot<0:# this result is based on iszerofunc's analysis of the possible pivots, so even though# the element may not be strictly zero, the supplied iszerofunc's evaluation gave TrueraiseValueError("No nonzero pivot found; inversion failed.")ifpivot!=j:# row must be swappedA.row_swap(pivot,j)p.append([pivot,j])scale=1/A[j,j]foriinrange(j+1,n):A[i,j]=A[i,j]*scalereturnA,p

[docs]defLUdecompositionFF(self):"""Compute a fraction-free LU decomposition. Returns 4 matrices P, L, D, U such that PA = L D**-1 U. If the elements of the matrix belong to some integral domain I, then all elements of L, D and U are guaranteed to belong to I. **Reference** - W. Zhou & D.J. Jeffrey, "Fraction-free matrix factors: new forms for LU and QR factors". Frontiers in Computer Science in China, Vol 2, no. 1, pp. 67-80, 2008. See Also ======== LUdecomposition LUdecomposition_Simple LUsolve """fromsympy.matricesimportSparseMatrixzeros=SparseMatrix.zeroseye=SparseMatrix.eyen,m=self.rows,self.colsU,L,P=self.as_mutable(),eye(n),eye(n)DD=zeros(n,n)oldpivot=1forkinrange(n-1):ifU[k,k]==0:forkpivotinrange(k+1,n):ifU[kpivot,k]:breakelse:raiseValueError("Matrix is not full rank")U[k,k:],U[kpivot,k:]=U[kpivot,k:],U[k,k:]L[k,:k],L[kpivot,:k]=L[kpivot,:k],L[k,:k]P[k,:],P[kpivot,:]=P[kpivot,:],P[k,:]L[k,k]=Ukk=U[k,k]DD[k,k]=oldpivot*Ukkforiinrange(k+1,n):L[i,k]=Uik=U[i,k]forjinrange(k+1,m):U[i,j]=(Ukk*U[i,j]-U[k,j]*Uik)/oldpivotU[i,k]=0oldpivot=UkkDD[n-1,n-1]=oldpivotreturnP,L,DD,U

[docs]defcofactorMatrix(self,method="berkowitz"):"""Return a matrix containing the cofactor of each element. See Also ======== cofactor minorEntry minorMatrix adjugate """out=self._new(self.rows,self.cols,lambdai,j:self.cofactor(i,j,method))returnout

[docs]defcofactor(self,i,j,method="berkowitz"):"""Calculate the cofactor of an element. See Also ======== cofactorMatrix minorEntry minorMatrix """if(i+j)%2==0:returnself.minorEntry(i,j,method)else:return-1*self.minorEntry(i,j,method)

[docs]defjacobian(self,X):"""Calculates the Jacobian matrix (derivative of a vectorial function). Parameters ========== self : vector of expressions representing functions f_i(x_1, ..., x_n). X : set of x_i's in order, it can be a list or a Matrix Both self and X can be a row or a column matrix in any order (i.e., jacobian() should always work). Examples ======== >>> from sympy import sin, cos, Matrix >>> from sympy.abc import rho, phi >>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2]) >>> Y = Matrix([rho, phi]) >>> X.jacobian(Y) Matrix([ [cos(phi), -rho*sin(phi)], [sin(phi), rho*cos(phi)], [ 2*rho, 0]]) >>> X = Matrix([rho*cos(phi), rho*sin(phi)]) >>> X.jacobian(Y) Matrix([ [cos(phi), -rho*sin(phi)], [sin(phi), rho*cos(phi)]]) See Also ======== hessian wronskian """ifnotisinstance(X,MatrixBase):X=self._new(X)# Both X and self can be a row or a column matrix, so we need to make# sure all valid combinations work, but everything else fails:ifself.shape[0]==1:m=self.shape[1]elifself.shape[1]==1:m=self.shape[0]else:raiseTypeError("self must be a row or a column matrix")ifX.shape[0]==1:n=X.shape[1]elifX.shape[1]==1:n=X.shape[0]else:raiseTypeError("X must be a row or a column matrix")# m is the number of functions and n is the number of variables# computing the Jacobian is now easy:returnself._new(m,n,lambdaj,i:self[j].diff(X[i]))

[docs]defQRsolve(self,b):"""Solve the linear system 'Ax = b'. 'self' is the matrix 'A', the method argument is the vector 'b'. The method returns the solution vector 'x'. If 'b' is a matrix, the system is solved for each column of 'b' and the return value is a matrix of the same shape as 'b'. This method is slower (approximately by a factor of 2) but more stable for floating-point arithmetic than the LUsolve method. However, LUsolve usually uses an exact arithmetic, so you don't need to use QRsolve. This is mainly for educational purposes and symbolic matrices, for real (or complex) matrices use mpmath.qr_solve. See Also ======== lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve pinv_solve QRdecomposition """Q,R=self.as_mutable().QRdecomposition()y=Q.T*b# back substitution to solve R*x = y:# We build up the result "backwards" in the vector 'x' and reverse it# only in the end.x=[]n=R.rowsforjinrange(n-1,-1,-1):tmp=y[j,:]forkinrange(j+1,n):tmp-=R[j,k]*x[n-1-k]x.append(tmp/R[j,j])returnself._new([row._matforrowinreversed(x)])

[docs]defcross(self,b):"""Return the cross product of `self` and `b` relaxing the condition of compatible dimensions: if each has 3 elements, a matrix of the same type and shape as `self` will be returned. If `b` has the same shape as `self` then common identities for the cross product (like `a x b = - b x a`) will hold. See Also ======== dot multiply multiply_elementwise """ifnotis_sequence(b):raiseTypeError("`b` must be an ordered iterable or Matrix, not %s."%type(b))ifnot(self.rows*self.cols==b.rows*b.cols==3):raiseShapeError("Dimensions incorrect for cross product.")else:returnself._new(self.rows,self.cols,((self[1]*b[2]-self[2]*b[1]),(self[2]*b[0]-self[0]*b[2]),(self[0]*b[1]-self[1]*b[0])))

[docs]defdot(self,b):"""Return the dot product of Matrix self and b relaxing the condition of compatible dimensions: if either the number of rows or columns are the same as the length of b then the dot product is returned. If self is a row or column vector, a scalar is returned. Otherwise, a list of results is returned (and in that case the number of columns in self must match the length of b). Examples ======== >>> from sympy import Matrix >>> M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> v = [1, 1, 1] >>> M.row(0).dot(v) 6 >>> M.col(0).dot(v) 12 >>> M.dot(v) [6, 15, 24] See Also ======== cross multiply multiply_elementwise """from.denseimportMatrixifnotisinstance(b,MatrixBase):ifis_sequence(b):iflen(b)!=self.colsandlen(b)!=self.rows:raiseShapeError("Dimensions incorrect for dot product.")returnself.dot(Matrix(b))else:raiseTypeError("`b` must be an ordered iterable or Matrix, not %s."%type(b))mat=selfifmat.cols==b.rows:ifb.cols!=1:mat=mat.Tb=b.Tprod=flatten((mat*b).tolist())iflen(prod)==1:returnprod[0]returnprodifmat.cols==b.cols:returnmat.dot(b.T)elifmat.rows==b.rows:returnmat.T.dot(b)else:raiseShapeError("Dimensions incorrect for dot product.")

[docs]defnormalized(self):"""Return the normalized version of ``self``. See Also ======== norm """ifself.rows!=1andself.cols!=1:raiseShapeError("A Matrix must be a vector to normalize.")norm=self.norm()out=self.applyfunc(lambdai:i/norm)returnout

[docs]defexp(self):"""Return the exponentiation of a square matrix."""ifnotself.is_square:raiseNonSquareMatrixError("Exponentiation is valid only for square matrices")try:P,cells=self.jordan_cells()exceptMatrixError:raiseNotImplementedError("Exponentiation is implemented only for matrices for which the Jordan normal form can be computed")def_jblock_exponential(b):# This function computes the matrix exponential for one single Jordan blocknr=b.rowsl=b[0,0]ifnr==1:res=exp(l)else:fromsympyimporteye# extract the diagonal partd=b[0,0]*eye(nr)#and the nilpotent partn=b-d# compute its exponentialnex=eye(nr)foriinrange(1,nr):nex=nex+n**i/factorial(i)# combine the two partsres=exp(b[0,0])*nexreturn(res)blocks=list(map(_jblock_exponential,cells))fromsympy.matricesimportdiageJ=diag(*blocks)# n = self.rowsret=P*eJ*P.inv()returntype(self)(ret)

@property

[docs]defis_square(self):"""Checks if a matrix is square. A matrix is square if the number of rows equals the number of columns. The empty matrix is square by definition, since the number of rows and the number of columns are both zero. Examples ======== >>> from sympy import Matrix >>> a = Matrix([[1, 2, 3], [4, 5, 6]]) >>> b = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> c = Matrix([]) >>> a.is_square False >>> b.is_square True >>> c.is_square True """returnself.rows==self.cols

@property

[docs]defis_zero(self):"""Checks if a matrix is a zero matrix. A matrix is zero if every element is zero. A matrix need not be square to be considered zero. The empty matrix is zero by the principle of vacuous truth. For a matrix that may or may not be zero (e.g. contains a symbol), this will be None Examples ======== >>> from sympy import Matrix, zeros >>> from sympy.abc import x >>> a = Matrix([[0, 0], [0, 0]]) >>> b = zeros(3, 4) >>> c = Matrix([[0, 1], [0, 0]]) >>> d = Matrix([]) >>> e = Matrix([[x, 0], [0, 0]]) >>> a.is_zero True >>> b.is_zero True >>> c.is_zero False >>> d.is_zero True >>> e.is_zero """ifany(i.is_zero==Falseforiinself):returnFalseifany(i.is_zero==Noneforiinself):returnNonereturnTrue

[docs]defdet(self,method="bareis"):"""Computes the matrix determinant using the method "method". Possible values for "method": bareis ... det_bareis berkowitz ... berkowitz_det det_LU ... det_LU_decomposition See Also ======== det_bareis berkowitz_det det_LU """# if methods were made internal and all determinant calculations# passed through here, then these lines could be factored out of# the method routinesifnotself.is_square:raiseNonSquareMatrixError()ifnotself:returnS.Oneifmethod=="bareis":returnself.det_bareis()elifmethod=="berkowitz":returnself.berkowitz_det()elifmethod=="det_LU":returnself.det_LU_decomposition()else:raiseValueError("Determinant method '%s' unrecognized"%method)

[docs]defdet_bareis(self):"""Compute matrix determinant using Bareis' fraction-free algorithm which is an extension of the well known Gaussian elimination method. This approach is best suited for dense symbolic matrices and will result in a determinant with minimal number of fractions. It means that less term rewriting is needed on resulting formulae. TODO: Implement algorithm for sparse matrices (SFF), http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps. See Also ======== det berkowitz_det """ifnotself.is_square:raiseNonSquareMatrixError()ifnotself:returnS.OneM,n=self.copy().as_mutable(),self.rowsifn==1:det=M[0,0]elifn==2:det=M[0,0]*M[1,1]-M[0,1]*M[1,0]elifn==3:det=(M[0,0]*M[1,1]*M[2,2]+M[0,1]*M[1,2]*M[2,0]+M[0,2]*M[1,0]*M[2,1])- \
(M[0,2]*M[1,1]*M[2,0]+M[0,0]*M[1,2]*M[2,1]+M[0,1]*M[1,0]*M[2,2])else:sign=1# track current sign in case of column swapforkinrange(n-1):# look for a pivot in the current column# and assume det == 0 if none is foundifM[k,k]==0:foriinrange(k+1,n):ifM[i,k]:M.row_swap(i,k)sign*=-1breakelse:returnS.Zero# proceed with Bareis' fraction-free (FF)# form of Gaussian elimination algorithmforiinrange(k+1,n):forjinrange(k+1,n):D=M[k,k]*M[i,j]-M[i,k]*M[k,j]ifk>0:D/=M[k-1,k-1]ifD.is_Atom:M[i,j]=Delse:M[i,j]=cancel(D)det=sign*M[n-1,n-1]returndet.expand()

[docs]defdet_LU_decomposition(self):"""Compute matrix determinant using LU decomposition Note that this method fails if the LU decomposition itself fails. In particular, if the matrix has no inverse this method will fail. TODO: Implement algorithm for sparse matrices (SFF), http://www.eecis.udel.edu/~saunders/papers/sffge/it5.ps. See Also ======== det det_bareis berkowitz_det """ifnotself.is_square:raiseNonSquareMatrixError()ifnotself:returnS.OneM,n=self.copy(),self.rowsp,prod=[],1l,u,p=M.LUdecomposition()iflen(p)%2:prod=-1forkinrange(n):prod=prod*u[k,k]*l[k,k]returnprod.expand()

[docs]defadjugate(self,method="berkowitz"):"""Returns the adjugate matrix. Adjugate matrix is the transpose of the cofactor matrix. http://en.wikipedia.org/wiki/Adjugate See Also ======== cofactorMatrix transpose berkowitz """returnself.cofactorMatrix(method).T

[docs]definverse_GE(self,iszerofunc=_iszero):"""Calculates the inverse using Gaussian elimination. See Also ======== inv inverse_LU inverse_ADJ """from.denseimportMatrixifnotself.is_square:raiseNonSquareMatrixError("A Matrix must be square to invert.")big=Matrix.hstack(self.as_mutable(),Matrix.eye(self.rows))red=big.rref(iszerofunc=iszerofunc,simplify=True)[0]ifany(iszerofunc(red[j,j])forjinrange(red.rows)):raiseValueError("Matrix det == 0; not invertible.")returnself._new(red[:,big.rows:])

[docs]definverse_ADJ(self,iszerofunc=_iszero):"""Calculates the inverse using the adjugate matrix and a determinant. See Also ======== inv inverse_LU inverse_GE """ifnotself.is_square:raiseNonSquareMatrixError("A Matrix must be square to invert.")d=self.berkowitz_det()zero=d.equals(0)ifzeroisNone:# if equals() can't decide, will rref be able to?ok=self.rref(simplify=True)[0]zero=any(iszerofunc(ok[j,j])forjinrange(ok.rows))ifzero:raiseValueError("Matrix det == 0; not invertible.")returnself.adjugate()/d

[docs]defberkowitz_charpoly(self,x=Dummy('lambda'),simplify=_simplify):"""Computes characteristic polynomial minors using Berkowitz method. A PurePoly is returned so using different variables for ``x`` does not affect the comparison or the polynomials: Examples ======== >>> from sympy import Matrix >>> from sympy.abc import x, y >>> A = Matrix([[1, 3], [2, 0]]) >>> A.berkowitz_charpoly(x) == A.berkowitz_charpoly(y) True Specifying ``x`` is optional; a Dummy with name ``lambda`` is used by default (which looks good when pretty-printed in unicode): >>> A.berkowitz_charpoly().as_expr() _lambda**2 - _lambda - 6 No test is done to see that ``x`` doesn't clash with an existing symbol, so using the default (``lambda``) or your own Dummy symbol is the safest option: >>> A = Matrix([[1, 2], [x, 0]]) >>> A.charpoly().as_expr() _lambda**2 - _lambda - 2*x >>> A.charpoly(x).as_expr() x**2 - 3*x See Also ======== berkowitz """returnPurePoly(list(map(simplify,self.berkowitz()[-1])),x)

charpoly=berkowitz_charpoly

[docs]defberkowitz_eigenvals(self,**flags):"""Computes eigenvalues of a Matrix using Berkowitz method. See Also ======== berkowitz """returnroots(self.berkowitz_charpoly(Dummy('x')),**flags)

[docs]defeigenvals(self,**flags):"""Return eigen values using the berkowitz_eigenvals routine. Since the roots routine doesn't always work well with Floats, they will be replaced with Rationals before calling that routine. If this is not desired, set flag ``rational`` to False. """# roots doesn't like Floats, so replace them with Rationals# unless the nsimplify flag indicates that this has already# been done, e.g. in eigenvectsmat=selfifflags.pop('rational',True):ifany(v.has(Float)forvinmat):mat=mat._new(mat.rows,mat.cols,[nsimplify(v,rational=True)forvinmat])flags.pop('simplify',None)# pop unsupported flagreturnmat.berkowitz_eigenvals(**flags)

[docs]defeigenvects(self,**flags):"""Return list of triples (eigenval, multiplicity, basis). The flag ``simplify`` has two effects: 1) if bool(simplify) is True, as_content_primitive() will be used to tidy up normalization artifacts; 2) if nullspace needs simplification to compute the basis, the simplify flag will be passed on to the nullspace routine which will interpret it there. If the matrix contains any Floats, they will be changed to Rationals for computation purposes, but the answers will be returned after being evaluated with evalf. If it is desired to removed small imaginary portions during the evalf step, pass a value for the ``chop`` flag. """fromsympy.matricesimporteyesimplify=flags.get('simplify',True)primitive=bool(flags.get('simplify',False))chop=flags.pop('chop',False)flags.pop('multiple',None)# remove this if it's there# roots doesn't like Floats, so replace them with Rationalsfloat=Falsemat=selfifany(v.has(Float)forvinself):float=Truemat=mat._new(mat.rows,mat.cols,[nsimplify(v,rational=True)forvinmat])flags['rational']=False# to tell eigenvals not to do thisout,vlist=[],mat.eigenvals(**flags)vlist=list(vlist.items())vlist.sort(key=default_sort_key)flags.pop('rational',None)forr,kinvlist:tmp=mat.as_mutable()-eye(mat.rows)*rbasis=tmp.nullspace()# whether tmp.is_symbolic() is True or False, it is possible that# the basis will come back as [] in which case simplification is# necessary.ifnotbasis:# The nullspace routine failed, try it again with simplificationbasis=tmp.nullspace(simplify=simplify)ifnotbasis:raiseNotImplementedError("Can't evaluate eigenvector for eigenvalue %s"%r)ifprimitive:# the relationship A*e = lambda*e will still hold if we change the# eigenvector; so if simplify is True we tidy up any normalization# artifacts with as_content_primtive (default) and remove any pure Integer# denominators.l=1fori,binenumerate(basis[0]):c,p=signsimp(b).as_content_primitive()ifcisnotS.One:b=c*pl=ilcm(l,c.q)basis[0][i]=bifl!=1:basis[0]*=liffloat:out.append((r.evalf(chop=chop),k,[mat._new(b).evalf(chop=chop)forbinbasis]))else:out.append((r,k,[mat._new(b)forbinbasis]))returnout

def__getattr__(self,attr):ifattrin('diff','integrate','limit'):defdoit(*args):item_doit=lambdaitem:getattr(item,attr)(*args)returnself.applyfunc(item_doit)returndoitelse:raiseAttributeError("%s has no attribute %s."%(self.__class__.__name__,attr))

def_diagonalize_clear_subproducts(self):delself._is_symbolicdelself._is_symmetricdelself._eigenvectsdefjordan_cell(self,eigenval,n):n=int(n)fromsympy.matricesimportMutableMatrixout=MutableMatrix.zeros(n)foriinrange(n-1):out[i,i]=eigenvalout[i,i+1]=1out[n-1,n-1]=eigenvalreturntype(self)(out)def_jordan_block_structure(self):# To every eigenvalue may belong `i` blocks with size s(i)# and a chain of generalized eigenvectors# which will be determined by the following computations:# for every eigenvalue we will add a dictionary# containing, for all blocks, the blocksizes and the attached chain vectors# that will eventually be used to form the transformation Pjordan_block_structures={}_eigenvects=self.eigenvects()ev=self.eigenvals()iflen(ev)==0:raiseAttributeError("could not compute the eigenvalues")foreigenval,multiplicity,vectsin_eigenvects:l_jordan_chains={}geometrical=len(vects)ifgeometrical==multiplicity:# The Jordan chains have all length 1 and consist of only one vector# which is the eigenvector of coursechains=[]forvinvects:chain=[v]chains.append(chain)l_jordan_chains[1]=chainsjordan_block_structures[eigenval]=l_jordan_chainselifgeometrical==0:raiseMatrixError("Matrix has the eigen vector with geometrical multiplicity equal zero.")else:# Up to now we know nothing about the sizes of the blocks of our Jordan matrix.# Note that knowledge of algebraic and geometrical multiplicity# will *NOT* be sufficient to determine this structure.# The blocksize `s` could be defined as the minimal `k` where# `kernel(self-lI)^k = kernel(self-lI)^(k+1)`# The extreme case would be that k = (multiplicity-geometrical+1)# but the blocks could be smaller.# Consider for instance the following matrix# [2 1 0 0]# [0 2 1 0]# [0 0 2 0]# [0 0 0 2]# which coincides with it own Jordan canonical form.# It has only one eigenvalue l=2 of (algebraic) multiplicity=4.# It has two eigenvectors, one belonging to the last row (blocksize 1)# and one being the last part of a jordan chain of length 3 (blocksize of the first block).# Note again that it is not not possible to obtain this from the algebraic and geometrical# multiplicity alone. This only gives us an upper limit for the dimension of one of# the subspaces (blocksize of according jordan block) given by# max=(multiplicity-geometrical+1) which is reached for our matrix# but not for# [2 1 0 0]# [0 2 0 0]# [0 0 2 1]# [0 0 0 2]# although multiplicity=4 and geometrical=2 are the same for this matrix.fromsympy.matricesimportMutableMatrixI=MutableMatrix.eye(self.rows)l=eigenvalM=(self-l*I)# We will store the matrices `(self-l*I)^k` for further computations# for convenience only we store `Ms[0]=(sefl-lI)^0=I`# so the index is the same as the power for all further Ms entries# We also store the vectors that span these kernels (Ns[0] = [])# and also their dimensions `a_s`# this is mainly done for debugging since the number of blocks of a given size# can be computed from the a_s, in order to check our result which is obtained simpler# by counting the number of Jordan chains for `a` given `s`# `a_0` is `dim(Kernel(Ms[0]) = dim (Kernel(I)) = 0` since `I` is regularl_jordan_chains={}Ms=[I]Ns=[[]]a=[0]smax=0M_new=Ms[-1]*MNs_new=M_new.nullspace()a_new=len(Ns_new)Ms.append(M_new)Ns.append(Ns_new)whilea_new>a[-1]:# as long as the nullspaces increase compute further powersa.append(a_new)M_new=Ms[-1]*MNs_new=M_new.nullspace()a_new=len(Ns_new)Ms.append(M_new)Ns.append(Ns_new)smax+=1# We now have `Ms[-1]=((self-l*I)**s)=Z=0`.# We also know the size of the biggest Jordan block# associated with `l` to be `s`.# Now let us proceed with the computation of the associate part of the transformation matrix `P`.# We already know the kernel (=nullspace) `K_l` of (self-lI) which consists of the# eigenvectors belonging to eigenvalue `l`.# The dimension of this space is the geometric multiplicity of eigenvalue `l`.# For every eigenvector ev out of `K_l`, there exists a subspace that is# spanned by the Jordan chain of ev. The dimension of this subspace is# represented by the length `s` of the Jordan block.# The chain itself is given by `{e_0,..,e_s-1}` where:# `e_k+1 =(self-lI)e_k (*)`# and# `e_s-1=ev`# So it would be possible to start with the already known `ev` and work backwards until one# reaches `e_0`. Unfortunately this can not be done by simply solving system (*) since its matrix# is singular (by definition of the eigenspaces).# This approach would force us a choose in every step the degree of freedom undetermined# by (*). This is difficult to implement with computer algebra systems and also quite inefficient.# We therefore reformulate the problem in terms of nullspaces.# To do so we start from the other end and choose `e0`'s out of# `E=Kernel(self-lI)^s / Kernel(self-lI)^(s-1)`# Note that `Kernel(self-lI)^s = Kernel(Z) = V` (the whole vector space).# So in the first step `s=smax` this restriction turns out to actually restrict nothing at all# and the only remaining condition is to choose vectors in `Kernel(self-lI)^(s-1)`.# Subsequently we compute `e_1=(self-lI)e_0`, `e_2=(self-lI)*e_1` and so on.# The subspace `E` can have a dimension larger than one.# That means that we have more than one Jordan block of size `s` for the eigenvalue `l`# and as many Jordan chains (this is the case in the second example).# In this case we start as many Jordan chains and have as many blocks of size `s` in the jcf.# We now have all the Jordan blocks of size `s` but there might be others attached to the same# eigenvalue that are smaller.# So we will do the same procedure also for `s-1` and so on until 1 (the lowest possible order# where the Jordan chain is of length 1 and just represented by the eigenvector).forsinreversed(range(1,smax+1)):S=Ms[s]# We want the vectors in `Kernel((self-lI)^s)`,# but without those in `Kernel(self-lI)^s-1`# so we will add their adjoints as additional equations# to the system formed by `S` to get the orthogonal# complement.# (`S` will no longer be quadratic.)exclude_vectors=Ns[s-1]forkinrange(0,a[s-1]):S=S.col_join((exclude_vectors[k]).adjoint())# We also want to exclude the vectors# in the chains for the bigger blocks# that we have already computed (if there are any).# (That is why we start with the biggest s).# Since Jordan blocks are not orthogonal in general# (in the original space), only those chain vectors# that are on level s (index `s-1` in a chain)# are added.forchain_listinl_jordan_chains.values():forchaininchain_list:S=S.col_join(chain[s-1].adjoint())e0s=S.nullspace()# Determine the number of chain leaders# for blocks of size `s`.n_e0=len(e0s)s_chains=[]# s_cells=[]foriinrange(0,n_e0):chain=[e0s[i]]forkinrange(1,s):v=M*chain[k-1]chain.append(v)# We want the chain leader appear as the last of the block.chain.reverse()s_chains.append(chain)l_jordan_chains[s]=s_chainsjordan_block_structures[eigenval]=l_jordan_chainsreturnjordan_block_structures

def_jordan_split(self,algebraical,geometrical):"""Return a list of integers with sum equal to 'algebraical' and length equal to 'geometrical'"""n1=algebraical//geometricalres=[n1]*geometricalres[len(res)-1]+=algebraical%geometricalassertsum(res)==algebraicalreturnres

[docs]defdual(self):"""Returns the dual of a matrix, which is: `(1/2)*levicivita(i, j, k, l)*M(k, l)` summed over indices `k` and `l` Since the levicivita method is anti_symmetric for any pairwise exchange of indices, the dual of a symmetric matrix is the zero matrix. Strictly speaking the dual defined here assumes that the 'matrix' `M` is a contravariant anti_symmetric second rank tensor, so that the dual is a covariant second rank tensor. """fromsympyimportLeviCivitafromsympy.matricesimportzerosM,n=self[:,:],self.rowswork=zeros(n)ifself.is_symmetric():returnworkforiinrange(1,n):forjinrange(1,n):acum=0forkinrange(1,n):acum+=LeviCivita(i,j,0,k)*M[0,k]work[i,j]=acumwork[j,i]=-acumforlinrange(1,n):acum=0forainrange(1,n):forbinrange(1,n):acum+=LeviCivita(0,l,a,b)*M[a,b]acum/=2work[0,l]=-acumwork[l,0]=acumreturnwork

[docs]defpinv(self):"""Calculate the Moore-Penrose pseudoinverse of the matrix. The Moore-Penrose pseudoinverse exists and is unique for any matrix. If the matrix is invertible, the pseudoinverse is the same as the inverse. Examples ======== >>> from sympy import Matrix >>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv() Matrix([ [-17/18, 4/9], [ -1/9, 1/9], [ 13/18, -2/9]]) See Also ======== inv pinv_solve References ========== .. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse """A=selfAH=self.H# Trivial case: pseudoinverse of all-zero matrix is its transpose.ifA.is_zero:returnAHtry:ifself.rows>=self.cols:return(AH*A).inv()*AHelse:returnAH*(A*AH).inv()exceptValueError:# Matrix is not full rank, so A*AH cannot be inverted.raiseNotImplementedError('Rank-deficient matrices are not yet ''supported.')

[docs]defpinv_solve(self,B,arbitrary_matrix=None):"""Solve Ax = B using the Moore-Penrose pseudoinverse. There may be zero, one, or infinite solutions. If one solution exists, it will be returned. If infinite solutions exist, one will be returned based on the value of arbitrary_matrix. If no solutions exist, the least-squares solution is returned. Parameters ========== B : Matrix The right hand side of the equation to be solved for. Must have the same number of rows as matrix A. arbitrary_matrix : Matrix If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary matrix. This parameter may be set to a specific matrix to use for that purpose; if so, it must be the same shape as x, with as many rows as matrix A has columns, and as many columns as matrix B. If left as None, an appropriate matrix containing dummy symbols in the form of ``wn_m`` will be used, with n and m being row and column position of each symbol. Returns ======= x : Matrix The matrix that will satisfy Ax = B. Will have as many rows as matrix A has columns, and as many columns as matrix B. Examples ======== >>> from sympy import Matrix >>> A = Matrix([[1, 2, 3], [4, 5, 6]]) >>> B = Matrix([7, 8]) >>> A.pinv_solve(B) Matrix([ [ _w0_0/6 - _w1_0/3 + _w2_0/6 - 55/18], [-_w0_0/3 + 2*_w1_0/3 - _w2_0/3 + 1/9], [ _w0_0/6 - _w1_0/3 + _w2_0/6 + 59/18]]) >>> A.pinv_solve(B, arbitrary_matrix=Matrix([0, 0, 0])) Matrix([ [-55/18], [ 1/9], [ 59/18]]) See Also ======== lower_triangular_solve upper_triangular_solve gauss_jordan_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv Notes ===== This may return either exact solutions or least squares solutions. To determine which, check ``A * A.pinv() * B == B``. It will be True if exact solutions exist, and False if only a least-squares solution exists. Be aware that the left hand side of that equation may need to be simplified to correctly compare to the right hand side. References ========== .. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse#Obtaining_all_solutions_of_a_linear_system """fromsympy.matricesimporteyeA=selfA_pinv=self.pinv()ifarbitrary_matrixisNone:rows,cols=A.cols,B.colsw=symbols('w:{0}_:{1}'.format(rows,cols),cls=Dummy)arbitrary_matrix=self.__class__(cols,rows,w).TreturnA_pinv*B+(eye(A.cols)-A_pinv*A)*arbitrary_matrix

[docs]defgauss_jordan_solve(self,b,freevar=False):""" Solves Ax = b using Gauss Jordan elimination. There may be zero, one, or infinite solutions. If one solution exists, it will be returned. If infinite solutions exist, it will be returned parametrically. If no solutions exist, It will throw ValueError. Parameters ========== b : Matrix The right hand side of the equation to be solved for. Must have the same number of rows as matrix A. freevar : List If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary values of free variables. Then the index of the free variables in the solutions (column Matrix) will be returned by freevar, if the flag `freevar` is set to `True`. Returns ======= x : Matrix The matrix that will satisfy Ax = B. Will have as many rows as matrix A has columns, and as many columns as matrix B. params : Matrix If the system is underdetermined (e.g. A has more columns than rows), infinite solutions are possible, in terms of an arbitrary parameters. These arbitrary parameters are returned as params Matrix. Examples ======== >>> from sympy import Matrix >>> A = Matrix([[1, 2, 1, 1], [1, 2, 2, -1], [2, 4, 0, 6]]) >>> b = Matrix([7, 12, 4]) >>> sol, params = A.gauss_jordan_solve(b) >>> sol Matrix([ [-2*_tau0 - 3*_tau1 + 2], [ _tau0], [ 2*_tau1 + 5], [ _tau1]]) >>> params Matrix([ [_tau0], [_tau1]]) >>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]]) >>> b = Matrix([3, 6, 9]) >>> sol, params = A.gauss_jordan_solve(b) >>> sol Matrix([ [-1], [ 2], [ 0]]) >>> params Matrix(0, 1, []) See Also ======== lower_triangular_solve upper_triangular_solve cholesky_solve diagonal_solve LDLsolve LUsolve QRsolve pinv References ========== .. [1] http://en.wikipedia.org/wiki/Gaussian_elimination """fromsympy.matricesimportMatrix,zerosaug=self.hstack(self.copy(),b.copy())row,col=aug[:,:-1].shape# solve by reduced row echelon formA,pivots=aug.rref(simplify=True)A,v=A[:,:-1],A[:,-1]pivots=list(filter(lambdap:p<col,pivots))rank=len(pivots)# Bring to block formpermutation=Matrix(range(col)).TA=A.vstack(A,permutation)fori,cinenumerate(pivots):A.col_swap(i,c)A,permutation=A[:-1,:],A[-1,:]# check for existence of solutions# rank of aug Matrix should be equal to rank of coefficient matrixifnotv[rank:,0].is_zero:raiseValueError("Linear system has no solution")# Get index of free symbols (free parameters)free_var_index=permutation[len(pivots):]# non-pivots columns are free variables# Free parametersdummygen=numbered_symbols("tau",Dummy)tau=Matrix([next(dummygen)forkinrange(col-rank)]).reshape(col-rank,1)# Full parametric solutionV=A[:rank,rank:]vt=v[:rank,0]free_sol=tau.vstack(vt-V*tau,tau)# Undo permutationsol=zeros(col,1)fork,vinenumerate(free_sol):sol[permutation[k],0]=viffreevar:returnsol,tau,free_var_indexelse:returnsol,tau

[docs]defa2idx(j,n=None):"""Return integer after making positive and validating against n."""iftype(j)isnotint:try:j=j.__index__()exceptAttributeError:raiseIndexError("Invalid index a[%r]"%(j,))ifnisnotNone:ifj<0:j+=nifnot(j>=0andj<n):raiseIndexError("Index out of range: a[%s]"%(j,))returnint(j)