aboutsummaryrefslogtreecommitdiff
path: root/SRC
diff options
context:
space:
mode:
authorjulie <julielangou@users.noreply.github.com>2011-03-10 19:09:21 +0000
committerjulie <julielangou@users.noreply.github.com>2011-03-10 19:09:21 +0000
commit1923c69f7e9857d19b67f9e9befd1b1f37c644ef (patch)
treea0729e3779db5c74c0dcda417a0d6eff5c8b3f95 /SRC
parent86579dddaf0142000a291a7fdac0850278b56317 (diff)
Formating code, use of * for comment, and follow other LAPACK style
Diffstat (limited to 'SRC')
-rw-r--r--SRC/chfrk.f119
-rw-r--r--SRC/chgeqz.f2
-rw-r--r--SRC/clag2z.f3
-rw-r--r--SRC/clanhf.f14
-rw-r--r--SRC/clarrv.f6
-rw-r--r--SRC/cpftrf.f66
-rw-r--r--SRC/cpftri.f36
-rw-r--r--SRC/cpftrs.f10
-rw-r--r--SRC/ctfsm.f245
-rw-r--r--SRC/ctftri.f84
-rw-r--r--SRC/ctfttp.f4
-rw-r--r--SRC/ctfttr.f4
-rw-r--r--SRC/ctpttf.f4
-rw-r--r--SRC/ctrttf.f4
-rw-r--r--SRC/dgejsv.f166
-rw-r--r--SRC/dgesvj.f442
-rw-r--r--SRC/dgsvj0.f344
-rw-r--r--SRC/dgsvj1.f218
-rw-r--r--SRC/dlag2s.f2
-rw-r--r--SRC/dlansf.f14
-rw-r--r--SRC/dlarrv.f6
-rw-r--r--SRC/dlasq4.f3
-rw-r--r--SRC/dlat2s.f6
-rw-r--r--SRC/dpftrf.f66
-rw-r--r--SRC/dpftri.f36
-rw-r--r--SRC/dpftrs.f10
-rw-r--r--SRC/dsfrk.f119
-rw-r--r--SRC/dsgesv.f26
-rw-r--r--SRC/dsposv.f24
-rw-r--r--SRC/dtfsm.f242
-rw-r--r--SRC/dtftri.f84
-rw-r--r--SRC/dtfttp.f4
-rw-r--r--SRC/dtfttr.f4
-rw-r--r--SRC/dtpttf.f4
-rw-r--r--SRC/dtrttf.f4
-rw-r--r--SRC/ieeeck.f2
-rw-r--r--SRC/sgejsv.f174
-rw-r--r--SRC/sgesvj.f442
-rw-r--r--SRC/sgsvj0.f344
-rw-r--r--SRC/sgsvj1.f218
-rw-r--r--SRC/slag2d.f3
-rw-r--r--SRC/slansf.f14
-rw-r--r--SRC/slarrv.f6
-rw-r--r--SRC/slasq4.f3
-rw-r--r--SRC/spftrf.f66
-rw-r--r--SRC/spftri.f36
-rw-r--r--SRC/spftrs.f10
-rw-r--r--SRC/ssfrk.f120
-rw-r--r--SRC/stfsm.f242
-rw-r--r--SRC/stftri.f84
-rw-r--r--SRC/stfttp.f4
-rw-r--r--SRC/stfttr.f4
-rw-r--r--SRC/stpttf.f4
-rw-r--r--SRC/strttf.f4
-rw-r--r--SRC/zcgesv.f28
-rw-r--r--SRC/zcposv.f26
-rw-r--r--SRC/zheevr.f12
-rw-r--r--SRC/zhfrk.f120
-rw-r--r--SRC/zlag2c.f8
-rw-r--r--SRC/zlanhf.f14
-rw-r--r--SRC/zlarrv.f6
-rw-r--r--SRC/zlat2c.f14
-rw-r--r--SRC/zpftrf.f66
-rw-r--r--SRC/zpftri.f36
-rw-r--r--SRC/zpftrs.f10
-rw-r--r--SRC/ztfsm.f245
-rw-r--r--SRC/ztftri.f84
-rw-r--r--SRC/ztfttp.f4
-rw-r--r--SRC/ztfttr.f4
-rw-r--r--SRC/ztpttf.f4
-rw-r--r--SRC/ztrttf.f4
71 files changed, 2447 insertions, 2443 deletions
diff --git a/SRC/chfrk.f b/SRC/chfrk.f
index 407015ec..8e85bfc5 100644
--- a/SRC/chfrk.f
+++ b/SRC/chfrk.f
@@ -1,5 +1,5 @@
SUBROUTINE CHFRK( TRANSR, UPLO, TRANS, N, K, ALPHA, A, LDA, BETA,
- + C )
+ $ C )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -107,8 +107,7 @@
* parts of the diagonal elements need not be set, they are
* assumed to be zero, and on exit they are set to zero.
*
-* Arguments
-* ==========
+* =====================================================================
*
* ..
* .. Parameters ..
@@ -172,7 +171,7 @@
* done (it is in CHERK for example) and left in the general case.
*
IF( ( N.EQ.0 ) .OR. ( ( ( ALPHA.EQ.ZERO ) .OR. ( K.EQ.0 ) ) .AND.
- + ( BETA.EQ.ONE ) ) )RETURN
+ $ ( BETA.EQ.ONE ) ) )RETURN
*
IF( ( ALPHA.EQ.ZERO ) .AND. ( BETA.EQ.ZERO ) ) THEN
DO J = 1, ( ( N*( N+1 ) ) / 2 )
@@ -219,22 +218,22 @@
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL CHERK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL CHERK( 'U', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL CGEMM( 'N', 'C', N2, N1, K, CALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'C'
*
CALL CHERK( 'L', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL CHERK( 'U', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL CGEMM( 'C', 'N', N2, N1, K, CALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
*
END IF
*
@@ -247,22 +246,22 @@
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL CHERK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL CHERK( 'U', 'N', N2, K, ALPHA, A( N2, 1 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL CGEMM( 'N', 'C', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( N2, 1 ), LDA, CBETA, C( 1 ), N )
+ $ LDA, A( N2, 1 ), LDA, CBETA, C( 1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'C'
*
CALL CHERK( 'L', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL CHERK( 'U', 'C', N2, K, ALPHA, A( 1, N2 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL CGEMM( 'C', 'N', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, N2 ), LDA, CBETA, C( 1 ), N )
+ $ LDA, A( 1, N2 ), LDA, CBETA, C( 1 ), N )
*
END IF
*
@@ -281,24 +280,24 @@
* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'N'
*
CALL CHERK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL CHERK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL CGEMM( 'N', 'C', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( N1+1, 1 ), LDA, CBETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( N1+1, 1 ), LDA, CBETA,
+ $ C( N1*N1+1 ), N1 )
*
ELSE
*
* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'C'
*
CALL CHERK( 'U', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL CHERK( 'L', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL CGEMM( 'C', 'N', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, N1+1 ), LDA, CBETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( 1, N1+1 ), LDA, CBETA,
+ $ C( N1*N1+1 ), N1 )
*
END IF
*
@@ -311,22 +310,22 @@
* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'N'
*
CALL CHERK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL CHERK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL CGEMM( 'N', 'C', N2, N1, K, CALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
*
ELSE
*
* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'C'
*
CALL CHERK( 'U', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL CHERK( 'L', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL CGEMM( 'C', 'N', N2, N1, K, CALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
*
END IF
*
@@ -351,24 +350,24 @@
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL CHERK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL CHERK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL CGEMM( 'N', 'C', NK, NK, K, CALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'C'
*
CALL CHERK( 'L', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL CHERK( 'U', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL CGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
+ $ N+1 )
*
END IF
*
@@ -381,24 +380,24 @@
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL CHERK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL CHERK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL CGEMM( 'N', 'C', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, CBETA, C( 1 ),
- + N+1 )
+ $ LDA, A( NK+1, 1 ), LDA, CBETA, C( 1 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'C'
*
CALL CHERK( 'L', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL CHERK( 'U', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL CGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, CBETA, C( 1 ),
- + N+1 )
+ $ LDA, A( 1, NK+1 ), LDA, CBETA, C( 1 ),
+ $ N+1 )
*
END IF
*
@@ -417,24 +416,24 @@
* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'N'
*
CALL CHERK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL CHERK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL CGEMM( 'N', 'C', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, CBETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( NK+1, 1 ), LDA, CBETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'C'
*
CALL CHERK( 'U', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL CHERK( 'L', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL CGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, CBETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( 1, NK+1 ), LDA, CBETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
END IF
*
@@ -447,22 +446,22 @@
* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'N'
*
CALL CHERK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL CHERK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL CGEMM( 'N', 'C', NK, NK, K, CALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'C'
*
CALL CHERK( 'U', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL CHERK( 'L', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL CGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
*
END IF
*
diff --git a/SRC/chgeqz.f b/SRC/chgeqz.f
index ddf4ee66..91283024 100644
--- a/SRC/chgeqz.f
+++ b/SRC/chgeqz.f
@@ -300,7 +300,7 @@
*
* Quick return if possible
*
-c WORK( 1 ) = CMPLX( 1 )
+* WORK( 1 ) = CMPLX( 1 )
IF( N.LE.0 ) THEN
WORK( 1 ) = CMPLX( 1 )
RETURN
diff --git a/SRC/clag2z.f b/SRC/clag2z.f
index df695c36..d7f2fce9 100644
--- a/SRC/clag2z.f
+++ b/SRC/clag2z.f
@@ -48,7 +48,8 @@
*
* INFO (output) INTEGER
* = 0: successful exit
-* =========
+*
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
diff --git a/SRC/clanhf.f b/SRC/clanhf.f
index 63818d9a..fb89fd5a 100644
--- a/SRC/clanhf.f
+++ b/SRC/clanhf.f
@@ -227,19 +227,19 @@
*
NOE = 1
IF( MOD( N, 2 ).EQ.0 )
- + NOE = 0
+ $ NOE = 0
*
* set ifm = 0 when form='C' or 'c' and 1 otherwise
*
IFM = 1
IF( LSAME( TRANSR, 'C' ) )
- + IFM = 0
+ $ IFM = 0
*
* set ilu = 0 when uplo='U or 'u' and 1 otherwise
*
ILU = 1
IF( LSAME( UPLO, 'U' ) )
- + ILU = 0
+ $ ILU = 0
*
* set lda = (n+1)/2 when ifm = 0
* set lda = n when ifm = 1 and noe = 1
@@ -498,7 +498,7 @@
END IF
END IF
ELSE IF( ( LSAME( NORM, 'I' ) ) .OR. ( LSAME( NORM, 'O' ) ) .OR.
- + ( NORM.EQ.'1' ) ) THEN
+ $ ( NORM.EQ.'1' ) ) THEN
*
* Find normI(A) ( = norm1(A), since A is Hermitian).
*
@@ -524,7 +524,7 @@
* -> A(j+k,j+k)
WORK( J+K ) = S + AA
IF( I.EQ.K+K )
- + GO TO 10
+ $ GO TO 10
I = I + 1
AA = ABS( REAL( A( I+J*LDA ) ) )
* -> A(j,j)
@@ -1037,7 +1037,7 @@
END DO
DO J = 0, K - 2
CALL CLASSQ( K-J-1, A( J+1+( J+K-1 )*LDA ), 1,
- + SCALE, S )
+ $ SCALE, S )
* L at A(0,k-1)
END DO
S = S + S
@@ -1226,7 +1226,7 @@
END DO
DO J = 0, K - 2
CALL CLASSQ( K-J-1, A( J+1+( J+K )*LDA ), 1, SCALE,
- + S )
+ $ S )
* L at A(0,k)
END DO
S = S + S
diff --git a/SRC/clarrv.f b/SRC/clarrv.f
index d996864e..42d7d9aa 100644
--- a/SRC/clarrv.f
+++ b/SRC/clarrv.f
@@ -338,7 +338,7 @@
* high relative accuracy is required for the computation of the
* corresponding eigenvectors.
CALL SCOPY( IM, W( WBEGIN ), 1,
- & WORK( WBEGIN ), 1 )
+ $ WORK( WBEGIN ), 1 )
* We store in W the eigenvalue approximations w.r.t. the original
* matrix T.
@@ -441,7 +441,7 @@
Q = INDEXW( WBEGIN-1+OLDLST )
* Offset for the arrays WORK, WGAP and WERR, i.e., the P-OFFSET
* through the Q-OFFSET elements of these arrays are to be used.
-C OFFSET = P-OLDFST
+* OFFSET = P-OLDFST
OFFSET = INDEXW( WBEGIN ) - 1
* perform limited bisection (if necessary) to get approximate
* eigenvalues to the precision needed.
@@ -580,7 +580,7 @@ C OFFSET = P-OLDFST
* Compute RRR of child cluster.
* Note that the new RRR is stored in Z
*
-C SLARRF needs LWORK = 2*N
+* SLARRF needs LWORK = 2*N
CALL SLARRF( IN, D( IBEGIN ), L( IBEGIN ),
$ WORK(INDLD+IBEGIN-1),
$ NEWFST, NEWLST, WORK(WBEGIN),
diff --git a/SRC/cpftrf.f b/SRC/cpftrf.f
index 62bb7be5..4726f806 100644
--- a/SRC/cpftrf.f
+++ b/SRC/cpftrf.f
@@ -215,7 +215,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -255,14 +255,14 @@
*
CALL CPOTRF( 'L', N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'R', 'L', 'C', 'N', N2, N1, CONE, A( 0 ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL CHERK( 'U', 'N', N2, N1, -ONE, A( N1 ), N, ONE,
- + A( N ), N )
+ $ A( N ), N )
CALL CPOTRF( 'U', N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -272,14 +272,14 @@
*
CALL CPOTRF( 'L', N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'L', 'L', 'N', 'N', N1, N2, CONE, A( N2 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL CHERK( 'U', 'C', N2, N1, -ONE, A( 0 ), N, ONE,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL CPOTRF( 'U', N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -295,14 +295,14 @@
*
CALL CPOTRF( 'U', N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'L', 'U', 'C', 'N', N1, N2, CONE, A( 0 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL CHERK( 'L', 'C', N2, N1, -ONE, A( N1*N1 ), N1, ONE,
- + A( 1 ), N1 )
+ $ A( 1 ), N1 )
CALL CPOTRF( 'L', N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -312,14 +312,14 @@
*
CALL CPOTRF( 'U', N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'R', 'U', 'N', 'N', N2, N1, CONE, A( N2*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL CHERK( 'L', 'N', N2, N1, -ONE, A( 0 ), N2, ONE,
- + A( N1*N2 ), N2 )
+ $ A( N1*N2 ), N2 )
CALL CPOTRF( 'L', N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -341,14 +341,14 @@
*
CALL CPOTRF( 'L', K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'R', 'L', 'C', 'N', K, K, CONE, A( 1 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL CHERK( 'U', 'N', K, K, -ONE, A( K+1 ), N+1, ONE,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL CPOTRF( 'U', K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -358,14 +358,14 @@
*
CALL CPOTRF( 'L', K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'L', 'L', 'N', 'N', K, K, CONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL CHERK( 'U', 'C', K, K, -ONE, A( 0 ), N+1, ONE,
- + A( K ), N+1 )
+ $ A( K ), N+1 )
CALL CPOTRF( 'U', K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
@@ -381,14 +381,14 @@
*
CALL CPOTRF( 'U', K, A( 0+K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'L', 'U', 'C', 'N', K, K, CONE, A( K ), N1,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL CHERK( 'L', 'C', K, K, -ONE, A( K*( K+1 ) ), K, ONE,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL CPOTRF( 'L', K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -398,14 +398,14 @@
*
CALL CPOTRF( 'U', K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRSM( 'R', 'U', 'N', 'N', K, K, CONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL CHERK( 'L', 'N', K, K, -ONE, A( 0 ), K, ONE,
- + A( K*K ), K )
+ $ A( K*K ), K )
CALL CPOTRF( 'L', K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
diff --git a/SRC/cpftri.f b/SRC/cpftri.f
index 0ebd150a..af349404 100644
--- a/SRC/cpftri.f
+++ b/SRC/cpftri.f
@@ -206,13 +206,13 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Invert the triangular Cholesky factor U or L.
*
CALL CTFTRI( TRANSR, UPLO, 'N', N, A, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -253,9 +253,9 @@
*
CALL CLAUUM( 'L', N1, A( 0 ), N, INFO )
CALL CHERK( 'L', 'C', N1, N2, ONE, A( N1 ), N, ONE,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL CTRMM( 'L', 'U', 'N', 'N', N2, N1, CONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL CLAUUM( 'U', N2, A( N ), N, INFO )
*
ELSE
@@ -266,9 +266,9 @@
*
CALL CLAUUM( 'L', N1, A( N2 ), N, INFO )
CALL CHERK( 'L', 'N', N1, N2, ONE, A( 0 ), N, ONE,
- + A( N2 ), N )
+ $ A( N2 ), N )
CALL CTRMM( 'R', 'U', 'C', 'N', N1, N2, CONE, A( N1 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL CLAUUM( 'U', N2, A( N1 ), N, INFO )
*
END IF
@@ -284,9 +284,9 @@
*
CALL CLAUUM( 'U', N1, A( 0 ), N1, INFO )
CALL CHERK( 'U', 'N', N1, N2, ONE, A( N1*N1 ), N1, ONE,
- + A( 0 ), N1 )
+ $ A( 0 ), N1 )
CALL CTRMM( 'R', 'L', 'N', 'N', N1, N2, CONE, A( 1 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL CLAUUM( 'L', N2, A( 1 ), N1, INFO )
*
ELSE
@@ -296,9 +296,9 @@
*
CALL CLAUUM( 'U', N1, A( N2*N2 ), N2, INFO )
CALL CHERK( 'U', 'C', N1, N2, ONE, A( 0 ), N2, ONE,
- + A( N2*N2 ), N2 )
+ $ A( N2*N2 ), N2 )
CALL CTRMM( 'L', 'L', 'C', 'N', N2, N1, CONE, A( N1*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL CLAUUM( 'L', N2, A( N1*N2 ), N2, INFO )
*
END IF
@@ -321,9 +321,9 @@
*
CALL CLAUUM( 'L', K, A( 1 ), N+1, INFO )
CALL CHERK( 'L', 'C', K, K, ONE, A( K+1 ), N+1, ONE,
- + A( 1 ), N+1 )
+ $ A( 1 ), N+1 )
CALL CTRMM( 'L', 'U', 'N', 'N', K, K, CONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL CLAUUM( 'U', K, A( 0 ), N+1, INFO )
*
ELSE
@@ -334,9 +334,9 @@
*
CALL CLAUUM( 'L', K, A( K+1 ), N+1, INFO )
CALL CHERK( 'L', 'N', K, K, ONE, A( 0 ), N+1, ONE,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL CTRMM( 'R', 'U', 'C', 'N', K, K, CONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL CLAUUM( 'U', K, A( K ), N+1, INFO )
*
END IF
@@ -353,9 +353,9 @@
*
CALL CLAUUM( 'U', K, A( K ), K, INFO )
CALL CHERK( 'U', 'N', K, K, ONE, A( K*( K+1 ) ), K, ONE,
- + A( K ), K )
+ $ A( K ), K )
CALL CTRMM( 'R', 'L', 'N', 'N', K, K, CONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL CLAUUM( 'L', K, A( 0 ), K, INFO )
*
ELSE
@@ -366,9 +366,9 @@
*
CALL CLAUUM( 'U', K, A( K*( K+1 ) ), K, INFO )
CALL CHERK( 'U', 'C', K, K, ONE, A( 0 ), K, ONE,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL CTRMM( 'L', 'L', 'C', 'N', K, K, CONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL CLAUUM( 'L', K, A( K*K ), K, INFO )
*
END IF
diff --git a/SRC/cpftrs.f b/SRC/cpftrs.f
index d056cc79..97f4b1c0 100644
--- a/SRC/cpftrs.f
+++ b/SRC/cpftrs.f
@@ -207,20 +207,20 @@
* Quick return if possible
*
IF( N.EQ.0 .OR. NRHS.EQ.0 )
- + RETURN
+ $ RETURN
*
* start execution: there are two triangular solves
*
IF( LOWER ) THEN
CALL CTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
CALL CTFSM( TRANSR, 'L', UPLO, 'C', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
ELSE
CALL CTFSM( TRANSR, 'L', UPLO, 'C', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
CALL CTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
END IF
*
RETURN
diff --git a/SRC/ctfsm.f b/SRC/ctfsm.f
index 4cc1a0a2..9934b6c6 100644
--- a/SRC/ctfsm.f
+++ b/SRC/ctfsm.f
@@ -1,5 +1,5 @@
SUBROUTINE CTFSM( TRANSR, SIDE, UPLO, TRANS, DIAG, M, N, ALPHA, A,
- + B, LDB )
+ $ B, LDB )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -231,15 +231,16 @@
* -- -- -- -- -- -- -- -- --
* 04 14 24 34 44 43 44 22 32 42 52
*
+* =====================================================================
* ..
* .. Parameters ..
COMPLEX CONE, CZERO
PARAMETER ( CONE = ( 1.0E+0, 0.0E+0 ),
- + CZERO = ( 0.0E+0, 0.0E+0 ) )
+ $ CZERO = ( 0.0E+0, 0.0E+0 ) )
* ..
* .. Local Scalars ..
LOGICAL LOWER, LSIDE, MISODD, NISODD, NORMALTRANSR,
- + NOTRANS
+ $ NOTRANS
INTEGER M1, M2, N1, N2, K, INFO, I, J
* ..
* .. External Functions ..
@@ -270,7 +271,7 @@
ELSE IF( .NOT.NOTRANS .AND. .NOT.LSAME( TRANS, 'C' ) ) THEN
INFO = -4
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -5
ELSE IF( M.LT.0 ) THEN
INFO = -6
@@ -287,7 +288,7 @@
* Quick return when ( (N.EQ.0).OR.(M.EQ.0) )
*
IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) )
- + RETURN
+ $ RETURN
*
* Quick return when ALPHA.EQ.(0E+0,0E+0)
*
@@ -341,14 +342,14 @@
*
IF( M.EQ.1 ) THEN
CALL CTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A, M, B, LDB )
+ $ A, M, B, LDB )
ELSE
CALL CTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
CALL CGEMM( 'N', 'N', M2, N, M1, -CONE, A( M1 ),
- + M, B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ M, B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL CTRSM( 'L', 'U', 'C', DIAG, M2, N, CONE,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -358,14 +359,14 @@
*
IF( M.EQ.1 ) THEN
CALL CTRSM( 'L', 'L', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
ELSE
CALL CTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
CALL CGEMM( 'C', 'N', M1, N, M2, -CONE, A( M1 ),
- + M, B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ M, B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'L', 'C', DIAG, M1, N, CONE,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
END IF
*
END IF
@@ -380,11 +381,11 @@
* TRANS = 'N'
*
CALL CTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
CALL CGEMM( 'C', 'N', M2, N, M1, -CONE, A( 0 ), M,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL CTRSM( 'L', 'U', 'C', DIAG, M2, N, CONE,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
*
ELSE
*
@@ -392,11 +393,11 @@
* TRANS = 'C'
*
CALL CTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
CALL CGEMM( 'N', 'N', M1, N, M2, -CONE, A( 0 ), M,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'L', 'C', DIAG, M1, N, CONE,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
*
END IF
*
@@ -417,15 +418,15 @@
*
IF( M.EQ.1 ) THEN
CALL CTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL CTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
CALL CGEMM( 'C', 'N', M2, N, M1, -CONE,
- + A( M1*M1 ), M1, B, LDB, ALPHA,
- + B( M1, 0 ), LDB )
+ $ A( M1*M1 ), M1, B, LDB, ALPHA,
+ $ B( M1, 0 ), LDB )
CALL CTRSM( 'L', 'L', 'N', DIAG, M2, N, CONE,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -435,15 +436,15 @@
*
IF( M.EQ.1 ) THEN
CALL CTRSM( 'L', 'U', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL CTRSM( 'L', 'L', 'C', DIAG, M2, N, ALPHA,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
CALL CGEMM( 'N', 'N', M1, N, M2, -CONE,
- + A( M1*M1 ), M1, B( M1, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( M1*M1 ), M1, B( M1, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL CTRSM( 'L', 'U', 'N', DIAG, M1, N, CONE,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
END IF
*
END IF
@@ -458,11 +459,11 @@
* TRANS = 'N'
*
CALL CTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
CALL CGEMM( 'N', 'N', M2, N, M1, -CONE, A( 0 ), M2,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL CTRSM( 'L', 'L', 'N', DIAG, M2, N, CONE,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
*
ELSE
*
@@ -470,11 +471,11 @@
* TRANS = 'C'
*
CALL CTRSM( 'L', 'L', 'C', DIAG, M2, N, ALPHA,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
CALL CGEMM( 'C', 'N', M1, N, M2, -CONE, A( 0 ), M2,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'U', 'N', DIAG, M1, N, CONE,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
*
END IF
*
@@ -500,11 +501,11 @@
* and TRANS = 'N'
*
CALL CTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
CALL CGEMM( 'N', 'N', K, N, K, -CONE, A( K+1 ),
- + M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
CALL CTRSM( 'L', 'U', 'C', DIAG, K, N, CONE,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
*
ELSE
*
@@ -512,11 +513,11 @@
* and TRANS = 'C'
*
CALL CTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
CALL CGEMM( 'C', 'N', K, N, K, -CONE, A( K+1 ),
- + M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'L', 'C', DIAG, K, N, CONE,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
*
END IF
*
@@ -530,22 +531,22 @@
* and TRANS = 'N'
*
CALL CTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
CALL CGEMM( 'C', 'N', K, N, K, -CONE, A( 0 ), M+1,
- + B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ B, LDB, ALPHA, B( K, 0 ), LDB )
CALL CTRSM( 'L', 'U', 'C', DIAG, K, N, CONE,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
*
ELSE
*
* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U',
* and TRANS = 'C'
CALL CTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
CALL CGEMM( 'N', 'N', K, N, K, -CONE, A( 0 ), M+1,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'L', 'C', DIAG, K, N, CONE,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
*
END IF
*
@@ -565,12 +566,12 @@
* and TRANS = 'N'
*
CALL CTRSM( 'L', 'U', 'C', DIAG, K, N, ALPHA,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
CALL CGEMM( 'C', 'N', K, N, K, -CONE,
- + A( K*( K+1 ) ), K, B, LDB, ALPHA,
- + B( K, 0 ), LDB )
+ $ A( K*( K+1 ) ), K, B, LDB, ALPHA,
+ $ B( K, 0 ), LDB )
CALL CTRSM( 'L', 'L', 'N', DIAG, K, N, CONE,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -578,12 +579,12 @@
* and TRANS = 'C'
*
CALL CTRSM( 'L', 'L', 'C', DIAG, K, N, ALPHA,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
CALL CGEMM( 'N', 'N', K, N, K, -CONE,
- + A( K*( K+1 ) ), K, B( K, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( K*( K+1 ) ), K, B( K, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL CTRSM( 'L', 'U', 'N', DIAG, K, N, CONE,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
*
END IF
*
@@ -597,11 +598,11 @@
* and TRANS = 'N'
*
CALL CTRSM( 'L', 'U', 'C', DIAG, K, N, ALPHA,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
CALL CGEMM( 'N', 'N', K, N, K, -CONE, A( 0 ), K, B,
- + LDB, ALPHA, B( K, 0 ), LDB )
+ $ LDB, ALPHA, B( K, 0 ), LDB )
CALL CTRSM( 'L', 'L', 'N', DIAG, K, N, CONE,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -609,11 +610,11 @@
* and TRANS = 'C'
*
CALL CTRSM( 'L', 'L', 'C', DIAG, K, N, ALPHA,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
CALL CGEMM( 'C', 'N', K, N, K, -CONE, A( 0 ), K,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL CTRSM( 'L', 'U', 'N', DIAG, K, N, CONE,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
*
END IF
*
@@ -663,12 +664,12 @@
* TRANS = 'N'
*
CALL CTRSM( 'R', 'U', 'C', DIAG, M, N2, ALPHA,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
CALL CGEMM( 'N', 'N', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL CTRSM( 'R', 'L', 'N', DIAG, M, N1, CONE,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
*
ELSE
*
@@ -676,12 +677,12 @@
* TRANS = 'C'
*
CALL CTRSM( 'R', 'L', 'C', DIAG, M, N1, ALPHA,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'C', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'N', DIAG, M, N2, CONE,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
*
END IF
*
@@ -695,12 +696,12 @@
* TRANS = 'N'
*
CALL CTRSM( 'R', 'L', 'C', DIAG, M, N1, ALPHA,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'N', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'N', DIAG, M, N2, CONE,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
*
ELSE
*
@@ -708,11 +709,11 @@
* TRANS = 'C'
*
CALL CTRSM( 'R', 'U', 'C', DIAG, M, N2, ALPHA,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
CALL CGEMM( 'N', 'C', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
CALL CTRSM( 'R', 'L', 'N', DIAG, M, N1, CONE,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
*
END IF
*
@@ -732,12 +733,12 @@
* TRANS = 'N'
*
CALL CTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
CALL CGEMM( 'N', 'C', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'C', DIAG, M, N1, CONE,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -745,12 +746,12 @@
* TRANS = 'C'
*
CALL CTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'N', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL CTRSM( 'R', 'L', 'C', DIAG, M, N2, CONE,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
*
END IF
*
@@ -764,12 +765,12 @@
* TRANS = 'N'
*
CALL CTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'C', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL CTRSM( 'R', 'L', 'C', DIAG, M, N2, CONE,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
*
ELSE
*
@@ -777,12 +778,12 @@
* TRANS = 'C'
*
CALL CTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
CALL CGEMM( 'N', 'N', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'C', DIAG, M, N1, CONE,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
*
END IF
*
@@ -808,12 +809,12 @@
* and TRANS = 'N'
*
CALL CTRSM( 'R', 'U', 'C', DIAG, M, K, ALPHA,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
CALL CGEMM( 'N', 'N', M, K, K, -CONE, B( 0, K ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL CTRSM( 'R', 'L', 'N', DIAG, M, K, CONE,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -821,12 +822,12 @@
* and TRANS = 'C'
*
CALL CTRSM( 'R', 'L', 'C', DIAG, M, K, ALPHA,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'C', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'N', DIAG, M, K, CONE,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
*
END IF
*
@@ -840,12 +841,12 @@
* and TRANS = 'N'
*
CALL CTRSM( 'R', 'L', 'C', DIAG, M, K, ALPHA,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'N', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL CTRSM( 'R', 'U', 'N', DIAG, M, K, CONE,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
*
ELSE
*
@@ -853,12 +854,12 @@
* and TRANS = 'C'
*
CALL CTRSM( 'R', 'U', 'C', DIAG, M, K, ALPHA,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
CALL CGEMM( 'N', 'C', M, K, K, -CONE, B( 0, K ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL CTRSM( 'R', 'L', 'N', DIAG, M, K, CONE,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
*
END IF
*
@@ -878,12 +879,12 @@
* and TRANS = 'N'
*
CALL CTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
CALL CGEMM( 'N', 'C', M, K, K, -CONE, B( 0, K ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, 0 ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, 0 ), LDB )
CALL CTRSM( 'R', 'U', 'C', DIAG, M, K, CONE,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
*
ELSE
*
@@ -891,12 +892,12 @@
* and TRANS = 'C'
*
CALL CTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'N', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, K ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, K ), LDB )
CALL CTRSM( 'R', 'L', 'C', DIAG, M, K, CONE,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
*
END IF
*
@@ -910,11 +911,11 @@
* and TRANS = 'N'
*
CALL CTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
CALL CGEMM( 'N', 'C', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
CALL CTRSM( 'R', 'L', 'C', DIAG, M, K, CONE,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
*
ELSE
*
@@ -922,11 +923,11 @@
* and TRANS = 'C'
*
CALL CTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
CALL CGEMM( 'N', 'N', M, K, K, -CONE, B( 0, K ),
- + LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
CALL CTRSM( 'R', 'U', 'C', DIAG, M, K, CONE,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
*
END IF
*
diff --git a/SRC/ctftri.f b/SRC/ctftri.f
index 0afa056b..a5978124 100644
--- a/SRC/ctftri.f
+++ b/SRC/ctftri.f
@@ -201,7 +201,7 @@
ELSE IF( .NOT.LOWER .AND. .NOT.LSAME( UPLO, 'U' ) ) THEN
INFO = -2
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -3
ELSE IF( N.LT.0 ) THEN
INFO = -4
@@ -214,7 +214,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -255,16 +255,16 @@
*
CALL CTRTRI( 'L', DIAG, N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'L', 'N', DIAG, N2, N1, -CONE, A( 0 ),
- + N, A( N1 ), N )
+ $ N, A( N1 ), N )
CALL CTRTRI( 'U', DIAG, N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'U', 'C', DIAG, N2, N1, CONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
*
ELSE
*
@@ -274,16 +274,16 @@
*
CALL CTRTRI( 'L', DIAG, N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'L', 'C', DIAG, N1, N2, -CONE, A( N2 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
CALL CTRTRI( 'U', DIAG, N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'U', 'N', DIAG, N1, N2, CONE, A( N1 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
*
END IF
*
@@ -298,16 +298,16 @@
*
CALL CTRTRI( 'U', DIAG, N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'U', 'N', DIAG, N1, N2, -CONE, A( 0 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
CALL CTRTRI( 'L', DIAG, N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'L', 'C', DIAG, N1, N2, CONE, A( 1 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
*
ELSE
*
@@ -316,16 +316,16 @@
*
CALL CTRTRI( 'U', DIAG, N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'U', 'C', DIAG, N2, N1, -CONE,
- + A( N2*N2 ), N2, A( 0 ), N2 )
+ $ A( N2*N2 ), N2, A( 0 ), N2 )
CALL CTRTRI( 'L', DIAG, N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'L', 'N', DIAG, N2, N1, CONE,
- + A( N1*N2 ), N2, A( 0 ), N2 )
+ $ A( N1*N2 ), N2, A( 0 ), N2 )
END IF
*
END IF
@@ -346,16 +346,16 @@
*
CALL CTRTRI( 'L', DIAG, K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'L', 'N', DIAG, K, K, -CONE, A( 1 ),
- + N+1, A( K+1 ), N+1 )
+ $ N+1, A( K+1 ), N+1 )
CALL CTRTRI( 'U', DIAG, K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'U', 'C', DIAG, K, K, CONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
*
ELSE
*
@@ -365,16 +365,16 @@
*
CALL CTRTRI( 'L', DIAG, K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'L', 'C', DIAG, K, K, -CONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL CTRTRI( 'U', DIAG, K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'U', 'N', DIAG, K, K, CONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
END IF
ELSE
*
@@ -388,16 +388,16 @@
*
CALL CTRTRI( 'U', DIAG, K, A( K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'U', 'N', DIAG, K, K, -CONE, A( K ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL CTRTRI( 'L', DIAG, K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'L', 'C', DIAG, K, K, CONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
ELSE
*
* SRPA for UPPER, TRANSPOSE and N is even (see paper)
@@ -406,16 +406,16 @@
*
CALL CTRTRI( 'U', DIAG, K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'R', 'U', 'C', DIAG, K, K, -CONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL CTRTRI( 'L', DIAG, K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL CTRMM( 'L', 'L', 'N', DIAG, K, K, CONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
END IF
END IF
END IF
diff --git a/SRC/ctfttp.f b/SRC/ctfttp.f
index 9dc90933..034c6f1e 100644
--- a/SRC/ctfttp.f
+++ b/SRC/ctfttp.f
@@ -201,7 +201,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -244,7 +244,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/ctfttr.f b/SRC/ctfttr.f
index 841e9f27..133fe4bf 100644
--- a/SRC/ctfttr.f
+++ b/SRC/ctfttr.f
@@ -236,11 +236,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/ctpttf.f b/SRC/ctpttf.f
index b9ebeba7..2c6ec570 100644
--- a/SRC/ctpttf.f
+++ b/SRC/ctpttf.f
@@ -198,7 +198,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -241,7 +241,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/ctrttf.f b/SRC/ctrttf.f
index a14be7ab..4835ca4a 100644
--- a/SRC/ctrttf.f
+++ b/SRC/ctrttf.f
@@ -235,11 +235,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/dgejsv.f b/SRC/dgejsv.f
index 043dcc61..e61b27a9 100644
--- a/SRC/dgejsv.f
+++ b/SRC/dgejsv.f
@@ -1,6 +1,6 @@
SUBROUTINE DGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP,
- & M, N, A, LDA, SVA, U, LDU, V, LDV,
- & WORK, LWORK, IWORK, INFO )
+ $ M, N, A, LDA, SVA, U, LDU, V, LDV,
+ $ WORK, LWORK, IWORK, INFO )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -22,7 +22,7 @@
* ..
* .. Array Arguments ..
DOUBLE PRECISION A( LDA, * ), SVA( N ), U( LDU, * ), V( LDV, * ),
- & WORK( LWORK )
+ $ WORK( LWORK )
INTEGER IWORK( * )
CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV
* ..
@@ -360,11 +360,11 @@
* Department of Mathematics, University of Zagreb, 2008.
*
* Bugs, examples and comments
-*
+*
* Please report all bugs and send interesting examples and/or comments to
* drmac@math.hr. Thank you.
*
-* ==========================================================================
+* ===========================================================================
*
* .. Local Parameters ..
DOUBLE PRECISION ZERO, ONE
@@ -372,16 +372,16 @@
* ..
* .. Local Scalars ..
DOUBLE PRECISION AAPP, AAQQ, AATMAX, AATMIN, BIG, BIG1, COND_OK,
- & CONDR1, CONDR2, ENTRA, ENTRAT, EPSLN, MAXPRJ, SCALEM,
- & SCONDA, SFMIN, SMALL, TEMP1, USCAL1, USCAL2, XSC
+ $ CONDR1, CONDR2, ENTRA, ENTRAT, EPSLN, MAXPRJ, SCALEM,
+ $ SCONDA, SFMIN, SMALL, TEMP1, USCAL1, USCAL2, XSC
INTEGER IERR, N1, NR, NUMRANK, p, q, WARNING
LOGICAL ALMORT, DEFR, ERREST, GOSCAL, JRACC, KILL, LSVEC,
- & L2ABER, L2KILL, L2PERT, L2RANK, L2TRAN,
- & NOSCAL, ROWPIV, RSVEC, TRANSP
+ $ L2ABER, L2KILL, L2PERT, L2RANK, L2TRAN,
+ $ NOSCAL, ROWPIV, RSVEC, TRANSP
* ..
* .. Intrinsic Functions ..
INTRINSIC DABS, DLOG, DMAX1, DMIN1, DBLE,
- & MAX0, MIN0, IDNINT, DSIGN, DSQRT
+ $ MAX0, MIN0, IDNINT, DSIGN, DSQRT
* ..
* .. External Functions ..
DOUBLE PRECISION DLAMCH, DNRM2
@@ -391,8 +391,8 @@
* ..
* .. External Subroutines ..
EXTERNAL DCOPY, DGELQF, DGEQP3, DGEQRF, DLACPY, DLASCL,
- & DLASET, DLASSQ, DLASWP, DORGQR, DORMLQ,
- & DORMQR, DPOCON, DSCAL, DSWAP, DTRSM, XERBLA
+ $ DLASET, DLASSQ, DLASWP, DORGQR, DORMLQ,
+ $ DORMQR, DPOCON, DSCAL, DSWAP, DTRSM, XERBLA
*
EXTERNAL DGESVJ
* ..
@@ -412,13 +412,13 @@
L2PERT = LSAME( JOBP, 'P' )
*
IF ( .NOT.(ROWPIV .OR. L2RANK .OR. L2ABER .OR.
- & ERREST .OR. LSAME( JOBA, 'C' ) )) THEN
+ $ ERREST .OR. LSAME( JOBA, 'C' ) )) THEN
INFO = - 1
ELSE IF ( .NOT.( LSVEC .OR. LSAME( JOBU, 'N' ) .OR.
- & LSAME( JOBU, 'W' )) ) THEN
+ $ LSAME( JOBU, 'W' )) ) THEN
INFO = - 2
ELSE IF ( .NOT.( RSVEC .OR. LSAME( JOBV, 'N' ) .OR.
- & LSAME( JOBV, 'W' )) .OR. ( JRACC .AND. (.NOT.LSVEC) ) ) THEN
+ $ LSAME( JOBV, 'W' )) .OR. ( JRACC .AND. (.NOT.LSVEC) ) ) THEN
INFO = - 3
ELSE IF ( .NOT. ( L2KILL .OR. DEFR ) ) THEN
INFO = - 4
@@ -437,14 +437,14 @@
ELSE IF ( RSVEC .AND. ( LDV .LT. N ) ) THEN
INFO = - 14
ELSE IF ( (.NOT.(LSVEC .OR. RSVEC .OR. ERREST).AND.
- & (LWORK .LT. MAX0(7,4*N+1,2*M+N))) .OR.
- & (.NOT.(LSVEC .OR. LSVEC) .AND. ERREST .AND.
- & (LWORK .LT. MAX0(7,4*N+N*N,2*M+N))) .OR.
- & (LSVEC .AND. (.NOT.RSVEC) .AND. (LWORK .LT. MAX0(7,2*N+M))) .OR.
- & (RSVEC .AND. (.NOT.LSVEC) .AND. (LWORK .LT. MAX0(7,2*N+M))) .OR.
- & (LSVEC .AND. RSVEC .AND. .NOT.JRACC .AND. (LWORK.LT.6*N+2*N*N))
- & .OR. (LSVEC.AND.RSVEC.AND.JRACC.AND.LWORK.LT.MAX0(7,M+3*N+N*N)))
- & THEN
+ $ (LWORK .LT. MAX0(7,4*N+1,2*M+N))) .OR.
+ $ (.NOT.(LSVEC .OR. LSVEC) .AND. ERREST .AND.
+ $ (LWORK .LT. MAX0(7,4*N+N*N,2*M+N))) .OR.
+ $ (LSVEC .AND. (.NOT.RSVEC) .AND. (LWORK .LT. MAX0(7,2*N+M))) .OR.
+ $ (RSVEC .AND. (.NOT.LSVEC) .AND. (LWORK .LT. MAX0(7,2*N+M))) .OR.
+ $ (LSVEC .AND. RSVEC .AND. .NOT.JRACC .AND. (LWORK.LT.6*N+2*N*N))
+ $ .OR. (LSVEC.AND.RSVEC.AND.JRACC.AND.LWORK.LT.MAX0(7,M+3*N+N*N)))
+ $ THEN
INFO = - 17
ELSE
* #:)
@@ -834,8 +834,8 @@
TEMP1 = DSQRT(SFMIN)
DO 3401 p = 2, N
IF ( ( DABS(A(p,p)) .LT. (EPSLN*DABS(A(p-1,p-1))) ) .OR.
- & ( DABS(A(p,p)) .LT. SMALL ) .OR.
- & ( L2KILL .AND. (DABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3402
+ $ ( DABS(A(p,p)) .LT. SMALL ) .OR.
+ $ ( L2KILL .AND. (DABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3402
NR = NR + 1
3401 CONTINUE
3402 CONTINUE
@@ -851,7 +851,7 @@
TEMP1 = DSQRT(SFMIN)
DO 3301 p = 2, N
IF ( ( DABS(A(p,p)) .LT. SMALL ) .OR.
- & ( L2KILL .AND. (DABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3302
+ $ ( L2KILL .AND. (DABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3302
NR = NR + 1
3301 CONTINUE
3302 CONTINUE
@@ -883,7 +883,7 @@
CALL DSCAL( p, ONE/TEMP1, V(1,p), 1 )
3053 CONTINUE
CALL DPOCON( 'U', N, V, LDV, ONE, TEMP1,
- & WORK(N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+1), IWORK(2*N+M+1), IERR )
ELSE IF ( LSVEC ) THEN
* .. U is available as workspace
CALL DLACPY( 'U', N, N, A, LDA, U, LDU )
@@ -892,7 +892,7 @@
CALL DSCAL( p, ONE/TEMP1, U(1,p), 1 )
3054 CONTINUE
CALL DPOCON( 'U', N, U, LDU, ONE, TEMP1,
- & WORK(N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+1), IWORK(2*N+M+1), IERR )
ELSE
CALL DLACPY( 'U', N, N, A, LDA, WORK(N+1), N )
DO 3052 p = 1, N
@@ -901,7 +901,7 @@
3052 CONTINUE
* .. the columns of R are scaled to have unit Euclidean lengths.
CALL DPOCON( 'U', N, WORK(N+1), N, ONE, TEMP1,
- & WORK(N+N*N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+N*N+1), IWORK(2*N+M+1), IERR )
END IF
SCONDA = ONE / DSQRT(TEMP1)
* SCONDA is an estimate of DSQRT(||(R^t * R)^(-1)||_1).
@@ -947,8 +947,8 @@
TEMP1 = XSC*DABS(A(q,q))
DO 4949 p = 1, N
IF ( ( (p.GT.q) .AND. (DABS(A(p,q)).LE.TEMP1) )
- & .OR. ( p .LT. q ) )
- & A(p,q) = DSIGN( TEMP1, A(p,q) )
+ $ .OR. ( p .LT. q ) )
+ $ A(p,q) = DSIGN( TEMP1, A(p,q) )
4949 CONTINUE
4947 CONTINUE
ELSE
@@ -977,8 +977,8 @@
TEMP1 = XSC*DABS(A(q,q))
DO 1949 p = 1, NR
IF ( ( (p.GT.q) .AND. (DABS(A(p,q)).LE.TEMP1) )
- & .OR. ( p .LT. q ) )
- & A(p,q) = DSIGN( TEMP1, A(p,q) )
+ $ .OR. ( p .LT. q ) )
+ $ A(p,q) = DSIGN( TEMP1, A(p,q) )
1949 CONTINUE
1947 CONTINUE
ELSE
@@ -990,7 +990,7 @@
* the part which destroys triangular form (confusing?!))
*
CALL DGESVJ( 'L', 'NoU', 'NoV', NR, NR, A, LDA, SVA,
- & N, V, LDV, WORK, LWORK, INFO )
+ $ N, V, LDV, WORK, LWORK, INFO )
*
SCALEM = WORK(1)
NUMRANK = IDNINT(WORK(2))
@@ -1009,7 +1009,7 @@
CALL DLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
*
CALL DGESVJ( 'L','U','N', N, NR, V,LDV, SVA, NR, A,LDA,
- & WORK, LWORK, INFO )
+ $ WORK, LWORK, INFO )
SCALEM = WORK(1)
NUMRANK = IDNINT(WORK(2))
@@ -1023,14 +1023,14 @@
CALL DLACPY( 'Lower', NR, NR, A, LDA, V, LDV )
CALL DLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
CALL DGEQRF( NR, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
DO 8998 p = 1, NR
CALL DCOPY( NR-p+1, V(p,p), LDV, V(p,p), 1 )
8998 CONTINUE
CALL DLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
*
CALL DGESVJ( 'Lower', 'U','N', NR, NR, V,LDV, SVA, NR, U,
- & LDU, WORK(N+1), LWORK, INFO )
+ $ LDU, WORK(N+1), LWORK, INFO )
SCALEM = WORK(N+1)
NUMRANK = IDNINT(WORK(N+2))
IF ( NR .LT. N ) THEN
@@ -1040,7 +1040,7 @@
END IF
*
CALL DORMLQ( 'Left', 'Transpose', N, N, NR, A, LDA, WORK,
- & V, LDV, WORK(N+1), LWORK-N, IERR )
+ $ V, LDV, WORK(N+1), LWORK-N, IERR )
*
END IF
*
@@ -1065,7 +1065,7 @@
CALL DLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, U(1,2), LDU )
*
CALL DGEQRF( N, NR, U, LDU, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
*
DO 1967 p = 1, NR - 1
CALL DCOPY( NR-p, U(p,p+1), LDU, U(p+1,p), 1 )
@@ -1073,7 +1073,7 @@
CALL DLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, U(1,2), LDU )
*
CALL DGESVJ( 'Lower', 'U', 'N', NR,NR, U, LDU, SVA, NR, A,
- & LDA, WORK(N+1), LWORK-N, INFO )
+ $ LDA, WORK(N+1), LWORK-N, INFO )
SCALEM = WORK(N+1)
NUMRANK = IDNINT(WORK(N+2))
*
@@ -1086,10 +1086,10 @@
END IF
*
CALL DORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
*
IF ( ROWPIV )
- & CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
DO 1974 p = 1, N1
XSC = ONE / DNRM2( M, U(1,p), 1 )
@@ -1137,9 +1137,9 @@
TEMP1 = XSC*DABS( V(q,q) )
DO 2968 p = 1, N
IF ( ( p .GT. q ) .AND. ( DABS(V(p,q)) .LE. TEMP1 )
- & .OR. ( p .LT. q ) )
- & V(p,q) = DSIGN( TEMP1, V(p,q) )
- IF ( p. LT. q ) V(p,q) = - V(p,q)
+ $ .OR. ( p .LT. q ) )
+ $ V(p,q) = DSIGN( TEMP1, V(p,q) )
+ IF ( p .LT. q ) V(p,q) = - V(p,q)
2968 CONTINUE
2969 CONTINUE
ELSE
@@ -1156,7 +1156,7 @@
CALL DSCAL(NR-p+1,ONE/TEMP1,WORK(2*N+(p-1)*NR+p),1)
3950 CONTINUE
CALL DPOCON('Lower',NR,WORK(2*N+1),NR,ONE,TEMP1,
- & WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR)
+ $ WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR)
CONDR1 = ONE / DSQRT(TEMP1)
* .. here need a second oppinion on the condition number
* .. then assume worst case scenario
@@ -1172,7 +1172,7 @@
* of a lower triangular matrix.
* R1^t = Q2 * R2
CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
*
IF ( L2PERT ) THEN
XSC = DSQRT(SMALL)/EPSLN
@@ -1180,14 +1180,14 @@
DO 3958 q = 1, p - 1
TEMP1 = XSC * DMIN1(DABS(V(p,p)),DABS(V(q,q)))
IF ( DABS(V(q,p)) .LE. TEMP1 )
- & V(q,p) = DSIGN( TEMP1, V(q,p) )
+ $ V(q,p) = DSIGN( TEMP1, V(q,p) )
3958 CONTINUE
3959 CONTINUE
END IF
*
IF ( NR .NE. N )
+ $ CALL DLACPY( 'A', N, NR, V, LDV, WORK(2*N+1), N )
* .. save ...
- & CALL DLACPY( 'A', N, NR, V, LDV, WORK(2*N+1), N )
*
* .. this transposed copy should be better than naive
DO 1969 p = 1, NR - 1
@@ -1210,16 +1210,16 @@
IWORK(N+p) = 0
3003 CONTINUE
CALL DGEQP3( N, NR, V, LDV, IWORK(N+1), WORK(N+1),
- & WORK(2*N+1), LWORK-2*N, IERR )
+ $ WORK(2*N+1), LWORK-2*N, IERR )
** CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
-** & LWORK-2*N, IERR )
+** $ LWORK-2*N, IERR )
IF ( L2PERT ) THEN
XSC = DSQRT(SMALL)
DO 3969 p = 2, NR
DO 3968 q = 1, p - 1
TEMP1 = XSC * DMIN1(DABS(V(p,p)),DABS(V(q,q)))
IF ( DABS(V(q,p)) .LE. TEMP1 )
- & V(q,p) = DSIGN( TEMP1, V(q,p) )
+ $ V(q,p) = DSIGN( TEMP1, V(q,p) )
3968 CONTINUE
3969 CONTINUE
END IF
@@ -1239,7 +1239,7 @@
END IF
* Now, compute R2 = L3 * Q3, the LQ factorization.
CALL DGELQF( NR, NR, V, LDV, WORK(2*N+N*NR+1),
- & WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, IERR )
+ $ WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, IERR )
* .. and estimate the condition number
CALL DLACPY( 'L',NR,NR,V,LDV,WORK(2*N+N*NR+NR+1),NR )
DO 4950 p = 1, NR
@@ -1247,7 +1247,7 @@
CALL DSCAL( p, ONE/TEMP1, WORK(2*N+N*NR+NR+p), NR )
4950 CONTINUE
CALL DPOCON( 'L',NR,WORK(2*N+N*NR+NR+1),NR,ONE,TEMP1,
- & WORK(2*N+N*NR+NR+NR*NR+1),IWORK(M+2*N+1),IERR )
+ $ WORK(2*N+N*NR+NR+NR*NR+1),IWORK(M+2*N+1),IERR )
CONDR2 = ONE / DSQRT(TEMP1)
*
IF ( CONDR2 .GE. COND_OK ) THEN
@@ -1284,7 +1284,7 @@
IF ( CONDR1 .LT. COND_OK ) THEN
*
CALL DGESVJ( 'L','U','N',NR,NR,V,LDV,SVA,NR,U,
- & LDU,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,INFO )
+ $ LDU,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = IDNINT(WORK(2*N+N*NR+NR+2))
DO 3970 p = 1, NR
@@ -1294,7 +1294,7 @@
* .. pick the right matrix equation and solve it
*
- IF ( NR. EQ. N ) THEN
+ IF ( NR .EQ. N ) THEN
* :)) .. best case, R1 is inverted. The solution of this matrix
* equation is Q2*V2 = the product of the Jacobi rotations
* used in DGESVJ, premultiplied with the orthogonal matrix
@@ -1306,14 +1306,14 @@
* used in DGESVJ. The Q-factor from the second QR
* factorization is then built in explicitly.
CALL DTRSM('L','U','T','N',NR,NR,ONE,WORK(2*N+1),
- & N,V,LDV)
+ $ N,V,LDV)
IF ( NR .LT. N ) THEN
CALL DLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV)
CALL DLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV)
CALL DLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV)
END IF
CALL DORMQR('L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR)
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR)
END IF
*
ELSE IF ( CONDR2 .LT. COND_OK ) THEN
@@ -1325,7 +1325,7 @@
* the lower triangular L3 from the LQ factorization of
* R2=L3*Q3), pre-multiplied with the transposed Q3.
CALL DGESVJ( 'L', 'U', 'N', NR, NR, V, LDV, SVA, NR, U,
- & LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
+ $ LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = IDNINT(WORK(2*N+N*NR+NR+2))
DO 3870 p = 1, NR
@@ -1348,7 +1348,7 @@
CALL DLASET( 'A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV )
END IF
CALL DORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
ELSE
* Last line of defense.
* #:( This is a rather pathological case: no scaled condition
@@ -1362,7 +1362,7 @@
* Compute the full SVD of L3 using DGESVJ with explicit
* accumulation of Jacobi rotations.
CALL DGESVJ( 'L', 'U', 'V', NR, NR, V, LDV, SVA, NR, U,
- & LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
+ $ LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = IDNINT(WORK(2*N+N*NR+NR+2))
IF ( NR .LT. N ) THEN
@@ -1371,11 +1371,11 @@
CALL DLASET( 'A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV )
END IF
CALL DORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
*
CALL DORMLQ( 'L', 'T', NR, NR, NR, WORK(2*N+1), N,
- & WORK(2*N+N*NR+1), U, LDU, WORK(2*N+N*NR+NR+1),
- & LWORK-2*N-N*NR-NR, IERR )
+ $ WORK(2*N+N*NR+1), U, LDU, WORK(2*N+N*NR+NR+1),
+ $ LWORK-2*N-N*NR-NR, IERR )
DO 773 q = 1, NR
DO 772 p = 1, NR
WORK(2*N+N*NR+NR+IWORK(N+p)) = U(p,q)
@@ -1401,7 +1401,7 @@
973 CONTINUE
XSC = ONE / DNRM2( N, V(1,q), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL DSCAL( N, XSC, V(1,q), 1 )
+ $ CALL DSCAL( N, XSC, V(1,q), 1 )
1972 CONTINUE
* At this moment, V contains the right singular vectors of A.
* Next, assemble the left singular vector matrix U (M x N).
@@ -1417,21 +1417,21 @@
* matrix U. This applies to all cases.
*
CALL DORMQR( 'Left', 'No_Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
* The columns of U are normalized. The cost is O(M*N) flops.
TEMP1 = DSQRT(DBLE(M)) * EPSLN
DO 1973 p = 1, NR
XSC = ONE / DNRM2( M, U(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL DSCAL( M, XSC, U(1,p), 1 )
+ $ CALL DSCAL( M, XSC, U(1,p), 1 )
1973 CONTINUE
*
* If the initial QRF is computed with row pivoting, the left
* singular vectors must be adjusted.
*
IF ( ROWPIV )
- & CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
ELSE
*
@@ -1452,7 +1452,7 @@
END IF
*
CALL DGESVJ( 'Upper', 'U', 'N', N, N, WORK(N+1), N, SVA,
- & N, U, LDU, WORK(N+N*N+1), LWORK-N-N*N, INFO )
+ $ N, U, LDU, WORK(N+N*N+1), LWORK-N-N*N, INFO )
*
SCALEM = WORK(N+N*N+1)
NUMRANK = IDNINT(WORK(N+N*N+2))
@@ -1462,7 +1462,7 @@
6970 CONTINUE
*
CALL DTRSM( 'Left', 'Upper', 'NoTrans', 'No UD', N, N,
- & ONE, A, LDA, WORK(N+1), N )
+ $ ONE, A, LDA, WORK(N+1), N )
DO 6972 p = 1, N
CALL DCOPY( N, WORK(N+p), N, V(IWORK(p),1), LDV )
6972 CONTINUE
@@ -1470,7 +1470,7 @@
DO 6971 p = 1, N
XSC = ONE / DNRM2( N, V(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL DSCAL( N, XSC, V(1,p), 1 )
+ $ CALL DSCAL( N, XSC, V(1,p), 1 )
6971 CONTINUE
*
* Assemble the left singular vector matrix U (M x N).
@@ -1483,16 +1483,16 @@
END IF
END IF
CALL DORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
TEMP1 = DSQRT(DBLE(M))*EPSLN
DO 6973 p = 1, N1
XSC = ONE / DNRM2( M, U(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL DSCAL( M, XSC, U(1,p), 1 )
+ $ CALL DSCAL( M, XSC, U(1,p), 1 )
6973 CONTINUE
*
IF ( ROWPIV )
- & CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
END IF
*
@@ -1520,9 +1520,9 @@
TEMP1 = XSC*DABS( V(q,q) )
DO 5968 p = 1, N
IF ( ( p .GT. q ) .AND. ( DABS(V(p,q)) .LE. TEMP1 )
- & .OR. ( p .LT. q ) )
- & V(p,q) = DSIGN( TEMP1, V(p,q) )
- IF ( p. LT. q ) V(p,q) = - V(p,q)
+ $ .OR. ( p .LT. q ) )
+ $ V(p,q) = DSIGN( TEMP1, V(p,q) )
+ IF ( p .LT. q ) V(p,q) = - V(p,q)
5968 CONTINUE
5969 CONTINUE
ELSE
@@ -1530,7 +1530,7 @@
END IF
CALL DGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
CALL DLACPY( 'L', N, NR, V, LDV, WORK(2*N+1), N )
*
DO 7969 p = 1, NR
@@ -1550,7 +1550,7 @@
END IF
CALL DGESVJ( 'G', 'U', 'V', NR, NR, U, LDU, SVA,
- & N, V, LDV, WORK(2*N+N*NR+1), LWORK-2*N-N*NR, INFO )
+ $ N, V, LDV, WORK(2*N+N*NR+1), LWORK-2*N-N*NR, INFO )
SCALEM = WORK(2*N+N*NR+1)
NUMRANK = IDNINT(WORK(2*N+N*NR+2))
@@ -1561,7 +1561,7 @@
END IF
CALL DORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
*
* Permute the rows of V using the (column) permutation from the
* first QRF. Also, scale the columns to make them unit in
@@ -1577,7 +1577,7 @@
8973 CONTINUE
XSC = ONE / DNRM2( N, V(1,q), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL DSCAL( N, XSC, V(1,q), 1 )
+ $ CALL DSCAL( N, XSC, V(1,q), 1 )
7972 CONTINUE
*
* At this moment, V contains the right singular vectors of A.
@@ -1592,10 +1592,10 @@
END IF
*
CALL DORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
*
IF ( ROWPIV )
- & CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL DLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
*
END IF
diff --git a/SRC/dgesvj.f b/SRC/dgesvj.f
index a8e154fc..f687183a 100644
--- a/SRC/dgesvj.f
+++ b/SRC/dgesvj.f
@@ -1,5 +1,5 @@
SUBROUTINE DGESVJ( JOBA, JOBU, JOBV, M, N, A, LDA, SVA, MV, V,
- + LDV, WORK, LWORK, INFO )
+ $ LDV, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -23,7 +23,7 @@
* ..
* .. Array Arguments ..
DOUBLE PRECISION A( LDA, * ), SVA( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -256,22 +256,22 @@
* .. Local Parameters ..
DOUBLE PRECISION ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0,
- + TWO = 2.0D0 )
+ $ TWO = 2.0D0 )
INTEGER NSWEEP
PARAMETER ( NSWEEP = 30 )
* ..
* .. Local Scalars ..
DOUBLE PRECISION AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, CTOL, EPSLN, LARGE, MXAAPQ,
- + MXSINJ, ROOTBIG, ROOTEPS, ROOTSFMIN, ROOTTOL,
- + SKL, SFMIN, SMALL, SN, T, TEMP1, THETA,
- + THSIGN, TOL
+ $ BIGTHETA, CS, CTOL, EPSLN, LARGE, MXAAPQ,
+ $ MXSINJ, ROOTBIG, ROOTEPS, ROOTSFMIN, ROOTTOL,
+ $ SKL, SFMIN, SMALL, SN, T, TEMP1, THETA,
+ $ THSIGN, TOL
INTEGER BLSKIP, EMPTSW, i, ibr, IERR, igl, IJBLSK, ir1,
- + ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, N2, N34,
- + N4, NBL, NOTROT, p, PSKIPPED, q, ROWSKIP,
- + SWBAND
+ $ ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, N2, N34,
+ $ N4, NBL, NOTROT, p, PSKIPPED, q, ROWSKIP,
+ $ SWBAND
LOGICAL APPLV, GOSCALE, LOWER, LSVEC, NOSCALE, ROTOK,
- + RSVEC, UCTOL, UPPER
+ $ RSVEC, UCTOL, UPPER
* ..
* .. Local Arrays ..
DOUBLE PRECISION FASTR( 5 )
@@ -327,7 +327,7 @@
ELSE IF( MV.LT.0 ) THEN
INFO = -9
ELSE IF( ( RSVEC .AND. ( LDV.LT.N ) ) .OR.
- + ( APPLV .AND. ( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV .AND. ( LDV.LT.MV ) ) ) THEN
INFO = -11
ELSE IF( UCTOL .AND. ( WORK( 1 ).LE.ONE ) ) THEN
INFO = -12
@@ -518,7 +518,7 @@
*
IF( N.EQ.1 ) THEN
IF( LSVEC )CALL DLASCL( 'G', 0, 0, SVA( 1 ), SKL, M, 1,
- + A( 1, 1 ), LDA, IERR )
+ $ A( 1, 1 ), LDA, IERR )
WORK( 1 ) = ONE / SKL
IF( SVA( 1 ).GE.SFMIN ) THEN
WORK( 2 ) = ONE
@@ -538,7 +538,7 @@
SN = DSQRT( SFMIN / EPSLN )
TEMP1 = DSQRT( BIG / DBLE( N ) )
IF( ( AAPP.LE.SN ) .OR. ( AAQQ.GE.TEMP1 ) .OR.
- + ( ( SN.LE.AAQQ ) .AND. ( AAPP.LE.TEMP1 ) ) ) THEN
+ $ ( ( SN.LE.AAQQ ) .AND. ( AAPP.LE.TEMP1 ) ) ) THEN
TEMP1 = DMIN1( BIG, TEMP1 / AAPP )
* AAQQ = AAQQ*TEMP1
* AAPP = AAPP*TEMP1
@@ -638,54 +638,54 @@
* [+ + x x] [x x]. [x x]
*
CALL DGSVJ0( JOBV, M-N34, N-N34, A( N34+1, N34+1 ), LDA,
- + WORK( N34+1 ), SVA( N34+1 ), MVL,
- + V( N34*q+1, N34+1 ), LDV, EPSLN, SFMIN, TOL,
- + 2, WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N34+1 ), SVA( N34+1 ), MVL,
+ $ V( N34*q+1, N34+1 ), LDV, EPSLN, SFMIN, TOL,
+ $ 2, WORK( N+1 ), LWORK-N, IERR )
*
CALL DGSVJ0( JOBV, M-N2, N34-N2, A( N2+1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 2,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 2,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL DGSVJ1( JOBV, M-N2, N-N2, N4, A( N2+1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL DGSVJ0( JOBV, M-N4, N2-N4, A( N4+1, N4+1 ), LDA,
- + WORK( N4+1 ), SVA( N4+1 ), MVL,
- + V( N4*q+1, N4+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N4+1 ), SVA( N4+1 ), MVL,
+ $ V( N4*q+1, N4+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL DGSVJ0( JOBV, M, N4, A, LDA, WORK, SVA, MVL, V, LDV,
- + EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL DGSVJ1( JOBV, M, N2, N4, A, LDA, WORK, SVA, MVL, V,
- + LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
- + LWORK-N, IERR )
+ $ LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
+ $ LWORK-N, IERR )
*
*
ELSE IF( UPPER ) THEN
*
*
CALL DGSVJ0( JOBV, N4, N4, A, LDA, WORK, SVA, MVL, V, LDV,
- + EPSLN, SFMIN, TOL, 2, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ EPSLN, SFMIN, TOL, 2, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL DGSVJ0( JOBV, N2, N4, A( 1, N4+1 ), LDA, WORK( N4+1 ),
- + SVA( N4+1 ), MVL, V( N4*q+1, N4+1 ), LDV,
- + EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ SVA( N4+1 ), MVL, V( N4*q+1, N4+1 ), LDV,
+ $ EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL DGSVJ1( JOBV, N2, N2, N4, A, LDA, WORK, SVA, MVL, V,
- + LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
- + LWORK-N, IERR )
+ $ LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
+ $ LWORK-N, IERR )
*
CALL DGSVJ0( JOBV, N2+N4, N4, A( 1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
END IF
*
@@ -725,7 +725,7 @@
IF( p.NE.q ) THEN
CALL DSWAP( M, A( 1, p ), 1, A( 1, q ), 1 )
IF( RSVEC )CALL DSWAP( MVL, V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, q ), 1 )
TEMP1 = SVA( p )
SVA( p ) = SVA( q )
SVA( q ) = TEMP1
@@ -749,7 +749,7 @@
* below should read "AAPP = DNRM2( M, A(1,p), 1 ) * WORK(p)".
*
IF( ( SVA( p ).LT.ROOTBIG ) .AND.
- + ( SVA( p ).GT.ROOTSFMIN ) ) THEN
+ $ ( SVA( p ).GT.ROOTSFMIN ) ) THEN
SVA( p ) = DNRM2( M, A( 1, p ), 1 )*WORK( p )
ELSE
TEMP1 = ZERO
@@ -777,31 +777,31 @@
ROTOK = ( SMALL*AAPP ).LE.AAQQ
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL DCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAPP,
- + WORK( p ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( p ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = DDOT( M, WORK( N+1 ), 1,
- + A( 1, q ), 1 )*WORK( q ) / AAQQ
+ $ A( 1, q ), 1 )*WORK( q ) / AAQQ
END IF
ELSE
ROTOK = AAPP.LE.( AAQQ / SMALL )
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL DCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAQQ,
- + WORK( q ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( q ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = DDOT( M, WORK( N+1 ), 1,
- + A( 1, p ), 1 )*WORK( p ) / AAPP
+ $ A( 1, p ), 1 )*WORK( p ) / AAPP
END IF
END IF
*
@@ -831,17 +831,17 @@
T = HALF / THETA
FASTR( 3 ) = T*WORK( p ) / WORK( q )
FASTR( 4 ) = -T*WORK( q ) /
- + WORK( p )
+ $ WORK( p )
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, DABS( T ) )
*
ELSE
@@ -850,15 +850,15 @@
*
THSIGN = -DSIGN( ONE, AAPQ )
T = ONE / ( THETA+THSIGN*
- + DSQRT( ONE+THETA*THETA ) )
+ $ DSQRT( ONE+THETA*THETA ) )
CS = DSQRT( ONE / ( ONE+T*T ) )
SN = T*CS
*
MXSINJ = DMAX1( MXSINJ, DABS( SN ) )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = WORK( p ) / WORK( q )
AQOAP = WORK( q ) / WORK( p )
@@ -869,88 +869,88 @@
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q )*CS
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
END IF
ELSE
IF( WORK( q ).GE.ONE ) THEN
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
ELSE
IF( WORK( p ).GE.WORK( q ) )
- + THEN
+ $ THEN
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -960,19 +960,19 @@
ELSE
* .. have to use modified Gram-Schmidt like transformation
CALL DCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAPP, ONE, M,
- + 1, WORK( N+1 ), LDA,
- + IERR )
+ $ 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
TEMP1 = -AAPQ*WORK( p ) / WORK( q )
CALL DAXPY( M, TEMP1, WORK( N+1 ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAQQ, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
END IF
* END IF ROTOK THEN ... ELSE
@@ -981,29 +981,29 @@
* recompute SVA(q), SVA(p).
*
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = DNRM2( M, A( 1, q ), 1 )*
- + WORK( q )
+ $ WORK( q )
ELSE
T = ZERO
AAQQ = ONE
CALL DLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*DSQRT( AAQQ )*WORK( q )
END IF
END IF
IF( ( AAPP / AAPP0 ).LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = DNRM2( M, A( 1, p ), 1 )*
- + WORK( p )
+ $ WORK( p )
ELSE
T = ZERO
AAPP = ONE
CALL DLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*DSQRT( AAPP )*WORK( p )
END IF
SVA( p ) = AAPP
@@ -1022,7 +1022,7 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
IF( ir1.EQ.0 )AAPP = -AAPP
NOTROT = 0
GO TO 2103
@@ -1039,7 +1039,7 @@
ELSE
SVA( p ) = AAPP
IF( ( ir1.EQ.0 ) .AND. ( AAPP.EQ.ZERO ) )
- + NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
+ $ NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
END IF
*
2001 CONTINUE
@@ -1084,16 +1084,16 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL DCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAPP,
- + WORK( p ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( p ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = DDOT( M, WORK( N+1 ), 1,
- + A( 1, q ), 1 )*WORK( q ) / AAQQ
+ $ A( 1, q ), 1 )*WORK( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -1103,16 +1103,16 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL DCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAQQ,
- + WORK( q ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( q ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = DDOT( M, WORK( N+1 ), 1,
- + A( 1, p ), 1 )*WORK( p ) / AAPP
+ $ A( 1, p ), 1 )*WORK( p ) / AAPP
END IF
END IF
*
@@ -1137,17 +1137,17 @@
T = HALF / THETA
FASTR( 3 ) = T*WORK( p ) / WORK( q )
FASTR( 4 ) = -T*WORK( q ) /
- + WORK( p )
+ $ WORK( p )
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, DABS( T ) )
ELSE
*
@@ -1156,14 +1156,14 @@
THSIGN = -DSIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + DSQRT( ONE+THETA*THETA ) )
+ $ DSQRT( ONE+THETA*THETA ) )
CS = DSQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = DMAX1( MXSINJ, DABS( SN ) )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = WORK( p ) / WORK( q )
AQOAP = WORK( q ) / WORK( p )
@@ -1175,26 +1175,26 @@
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q )*CS
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
@@ -1202,61 +1202,61 @@
ELSE
IF( WORK( q ).GE.ONE ) THEN
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
ELSE
IF( WORK( p ).GE.WORK( q ) )
- + THEN
+ $ THEN
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -1266,39 +1266,39 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL DCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK( N+1 ), LDA,
- + IERR )
+ $ M, 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*WORK( p ) / WORK( q )
CALL DAXPY( M, TEMP1, WORK( N+1 ),
- + 1, A( 1, q ), 1 )
+ $ 1, A( 1, q ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
ELSE
CALL DCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK( N+1 ), LDA,
- + IERR )
+ $ M, 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*WORK( q ) / WORK( p )
CALL DAXPY( M, TEMP1, WORK( N+1 ),
- + 1, A( 1, p ), 1 )
+ $ 1, A( 1, p ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -1307,29 +1307,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = DNRM2( M, A( 1, q ), 1 )*
- + WORK( q )
+ $ WORK( q )
ELSE
T = ZERO
AAQQ = ONE
CALL DLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*DSQRT( AAQQ )*WORK( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = DNRM2( M, A( 1, p ), 1 )*
- + WORK( p )
+ $ WORK( p )
ELSE
T = ZERO
AAPP = ONE
CALL DLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*DSQRT( AAPP )*WORK( p )
END IF
SVA( p ) = AAPP
@@ -1348,13 +1348,13 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -1369,7 +1369,7 @@
ELSE
*
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
*
END IF
@@ -1389,7 +1389,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = DNRM2( M, A( 1, N ), 1 )*WORK( N )
ELSE
T = ZERO
@@ -1401,10 +1401,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
*
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.DSQRT( DBLE( N ) )*
- + TOL ) .AND. ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ TOL ) .AND. ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
*
@@ -1477,8 +1477,8 @@
*
* Undo scaling, if necessary (and possible).
IF( ( ( SKL.GT.ONE ) .AND. ( SVA( 1 ).LT.( BIG /
- + SKL) ) ) .OR. ( ( SKL.LT.ONE ) .AND. ( SVA( N2 ).GT.
- + ( SFMIN / SKL) ) ) ) THEN
+ $ SKL) ) ) .OR. ( ( SKL.LT.ONE ) .AND. ( SVA( N2 ).GT.
+ $ ( SFMIN / SKL) ) ) ) THEN
DO 2400 p = 1, N
SVA( p ) = SKL*SVA( p )
2400 CONTINUE
diff --git a/SRC/dgsvj0.f b/SRC/dgsvj0.f
index c26f929f..6644fbcb 100644
--- a/SRC/dgsvj0.f
+++ b/SRC/dgsvj0.f
@@ -1,5 +1,5 @@
SUBROUTINE DGSVJ0( JOBV, M, N, A, LDA, D, SVA, MV, V, LDV, EPS,
- + SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
+ $ SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -24,7 +24,7 @@
* ..
* .. Array Arguments ..
DOUBLE PRECISION A( LDA, * ), SVA( N ), D( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -145,16 +145,16 @@
* .. Local Parameters ..
DOUBLE PRECISION ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0,
- + TWO = 2.0D0 )
+ $ TWO = 2.0D0 )
* ..
* .. Local Scalars ..
DOUBLE PRECISION AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, MXAAPQ, MXSINJ, ROOTBIG, ROOTEPS,
- + ROOTSFMIN, ROOTTOL, SMALL, SN, T, TEMP1, THETA,
- + THSIGN
+ $ BIGTHETA, CS, MXAAPQ, MXSINJ, ROOTBIG, ROOTEPS,
+ $ ROOTSFMIN, ROOTTOL, SMALL, SN, T, TEMP1, THETA,
+ $ THSIGN
INTEGER BLSKIP, EMPTSW, i, ibr, IERR, igl, IJBLSK, ir1,
- + ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, NBL,
- + NOTROT, p, PSKIPPED, q, ROWSKIP, SWBAND
+ $ ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, NBL,
+ $ NOTROT, p, PSKIPPED, q, ROWSKIP, SWBAND
LOGICAL APPLV, ROTOK, RSVEC
* ..
* .. Local Arrays ..
@@ -189,7 +189,7 @@
ELSE IF( ( RSVEC.OR.APPLV ) .AND. ( MV.LT.0 ) ) THEN
INFO = -8
ELSE IF( ( RSVEC.AND.( LDV.LT.N ) ).OR.
- & ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
INFO = -10
ELSE IF( TOL.LE.EPS ) THEN
INFO = -13
@@ -282,7 +282,7 @@
IF( p.NE.q ) THEN
CALL DSWAP( M, A( 1, p ), 1, A( 1, q ), 1 )
IF( RSVEC )CALL DSWAP( MVL, V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, q ), 1 )
TEMP1 = SVA( p )
SVA( p ) = SVA( q )
SVA( q ) = TEMP1
@@ -306,7 +306,7 @@
* below should read "AAPP = DNRM2( M, A(1,p), 1 ) * D(p)".
*
IF( ( SVA( p ).LT.ROOTBIG ) .AND.
- + ( SVA( p ).GT.ROOTSFMIN ) ) THEN
+ $ ( SVA( p ).GT.ROOTSFMIN ) ) THEN
SVA( p ) = DNRM2( M, A( 1, p ), 1 )*D( p )
ELSE
TEMP1 = ZERO
@@ -335,27 +335,27 @@
ROTOK = ( SMALL*AAPP ).LE.AAQQ
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
ROTOK = AAPP.LE.( AAQQ / SMALL )
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
*
@@ -386,15 +386,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, DABS( T ) )
*
ELSE
@@ -403,15 +403,15 @@
*
THSIGN = -DSIGN( ONE, AAPQ )
T = ONE / ( THETA+THSIGN*
- + DSQRT( ONE+THETA*THETA ) )
+ $ DSQRT( ONE+THETA*THETA ) )
CS = DSQRT( ONE / ( ONE+T*T ) )
SN = T*CS
*
MXSINJ = DMAX1( MXSINJ, DABS( SN ) )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -422,87 +422,87 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
END IF
ELSE
IF( D( q ).GE.ONE ) THEN
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -513,16 +513,16 @@
* .. have to use modified Gram-Schmidt like transformation
CALL DCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAPP, ONE, M,
- + 1, WORK, LDA, IERR )
+ $ 1, WORK, LDA, IERR )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL DAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAQQ, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
END IF
* END IF ROTOK THEN ... ELSE
@@ -530,29 +530,29 @@
* In the case of cancellation in updating SVA(q), SVA(p)
* recompute SVA(q), SVA(p).
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = DNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL DLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*DSQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 ).LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = DNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL DLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*DSQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -570,7 +570,7 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
IF( ir1.EQ.0 )AAPP = -AAPP
NOTROT = 0
GO TO 2103
@@ -587,7 +587,7 @@
ELSE
SVA( p ) = AAPP
IF( ( ir1.EQ.0 ) .AND. ( AAPP.EQ.ZERO ) )
- + NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
+ $ NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
END IF
*
2001 CONTINUE
@@ -635,14 +635,14 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -652,14 +652,14 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
*
@@ -685,15 +685,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, DABS( T ) )
ELSE
*
@@ -702,14 +702,14 @@
THSIGN = -DSIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + DSQRT( ONE+THETA*THETA ) )
+ $ DSQRT( ONE+THETA*THETA ) )
CS = DSQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = DMAX1( MXSINJ, DABS( SN ) )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -721,26 +721,26 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
@@ -748,60 +748,60 @@
ELSE
IF( D( q ).GE.ONE ) THEN
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -811,37 +811,37 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL DCOPY( M, A( 1, p ), 1, WORK,
- + 1 )
+ $ 1 )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL DAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
ELSE
CALL DCOPY( M, A( 1, q ), 1, WORK,
- + 1 )
+ $ 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( q ) / D( p )
CALL DAXPY( M, TEMP1, WORK, 1,
- + A( 1, p ), 1 )
+ $ A( 1, p ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -850,29 +850,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = DNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL DLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*DSQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = DNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL DLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*DSQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -890,13 +890,13 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -910,7 +910,7 @@
*
ELSE
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
END IF
@@ -929,7 +929,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = DNRM2( M, A( 1, N ), 1 )*D( N )
ELSE
T = ZERO
@@ -941,10 +941,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
*
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.DBLE( N )*TOL ) .AND.
- + ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
*
diff --git a/SRC/dgsvj1.f b/SRC/dgsvj1.f
index 89704e73..e9a3494e 100644
--- a/SRC/dgsvj1.f
+++ b/SRC/dgsvj1.f
@@ -1,5 +1,5 @@
SUBROUTINE DGSVJ1( JOBV, M, N, N1, A, LDA, D, SVA, MV, V, LDV,
- + EPS, SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
+ $ EPS, SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -24,7 +24,7 @@
* ..
* .. Array Arguments ..
DOUBLE PRECISION A( LDA, * ), D( N ), SVA( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -44,12 +44,12 @@
* block-entries (tiles) of the (1,2) off-diagonal block are marked by the
* [x]'s in the following scheme:
*
-* | * * * [x] [x] [x]|
-* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
-* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
+* | * C * [x] [x] [x]|
+* | * C * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
+* | * C * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
*
* In terms of the columns of A, the first N1 columns are rotated 'against'
* the remaining N-N1 columns, trying to increase the angle between the
@@ -162,16 +162,16 @@
* .. Local Parameters ..
DOUBLE PRECISION ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0D0, HALF = 0.5D0, ONE = 1.0D0,
- + TWO = 2.0D0 )
+ $ TWO = 2.0D0 )
* ..
* .. Local Scalars ..
DOUBLE PRECISION AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, LARGE, MXAAPQ, MXSINJ, ROOTBIG,
- + ROOTEPS, ROOTSFMIN, ROOTTOL, SMALL, SN, T,
- + TEMP1, THETA, THSIGN
+ $ BIGTHETA, CS, LARGE, MXAAPQ, MXSINJ, ROOTBIG,
+ $ ROOTEPS, ROOTSFMIN, ROOTTOL, SMALL, SN, T,
+ $ TEMP1, THETA, THSIGN
INTEGER BLSKIP, EMPTSW, i, ibr, igl, IERR, IJBLSK,
- + ISWROT, jbc, jgl, KBL, MVL, NOTROT, nblc, nblr,
- + p, PSKIPPED, q, ROWSKIP, SWBAND
+ $ ISWROT, jbc, jgl, KBL, MVL, NOTROT, nblc, nblr,
+ $ p, PSKIPPED, q, ROWSKIP, SWBAND
LOGICAL APPLV, ROTOK, RSVEC
* ..
* .. Local Arrays ..
@@ -208,7 +208,7 @@
ELSE IF( ( RSVEC.OR.APPLV ) .AND. ( MV.LT.0 ) ) THEN
INFO = -9
ELSE IF( ( RSVEC.AND.( LDV.LT.N ) ).OR.
- & ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
INFO = -11
ELSE IF( TOL.LE.EPS ) THEN
INFO = -14
@@ -271,12 +271,12 @@
* Jacobi SVD algorithm SGESVJ.
*
*
-* | * * * [x] [x] [x]|
-* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
-* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
+* | * C * [x] [x] [x]|
+* | * C * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
+* | * C * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
*
*
DO 1993 i = 1, NSWEEP
@@ -333,14 +333,14 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -350,14 +350,14 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( DDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL DCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = DDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
@@ -383,15 +383,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, DABS( T ) )
ELSE
*
@@ -400,14 +400,14 @@
THSIGN = -DSIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + DSQRT( ONE+THETA*THETA ) )
+ $ DSQRT( ONE+THETA*THETA ) )
CS = DSQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = DMAX1( MXSINJ, DABS( SN ) )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -419,26 +419,26 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL DROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL DROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
@@ -446,60 +446,60 @@
ELSE
IF( D( q ).GE.ONE ) THEN
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL DAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL DAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL DAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL DAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL DAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL DAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL DAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL DAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -509,37 +509,37 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL DCOPY( M, A( 1, p ), 1, WORK,
- + 1 )
+ $ 1 )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL DAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
ELSE
CALL DCOPY( M, A( 1, q ), 1, WORK,
- + 1 )
+ $ 1 )
CALL DLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL DLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( q ) / D( p )
CALL DAXPY( M, TEMP1, WORK, 1,
- + A( 1, p ), 1 )
+ $ A( 1, p ), 1 )
CALL DLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*DSQRT( DMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = DMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -548,29 +548,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = DNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL DLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*DSQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = DNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL DLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*DSQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -590,13 +590,13 @@
* IF ( NOTROT .GE. EMPTSW ) GO TO 2011
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -611,7 +611,7 @@
*
ELSE
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
*** IF ( NOTROT .GE. EMPTSW ) GO TO 2011
END IF
@@ -631,7 +631,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = DNRM2( M, A( 1, N ), 1 )*D( N )
ELSE
T = ZERO
@@ -643,10 +643,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.DBLE( N )*TOL ) .AND.
- + ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ ( DBLE( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
diff --git a/SRC/dlag2s.f b/SRC/dlag2s.f
index af47befb..1c0ae741 100644
--- a/SRC/dlag2s.f
+++ b/SRC/dlag2s.f
@@ -54,7 +54,7 @@
* PRECISION overflow threshold, in this case, the content
* of SA in exit is unspecified.
*
-* =========
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
diff --git a/SRC/dlansf.f b/SRC/dlansf.f
index 903b6eb3..6ba669b7 100644
--- a/SRC/dlansf.f
+++ b/SRC/dlansf.f
@@ -193,19 +193,19 @@
*
NOE = 1
IF( MOD( N, 2 ).EQ.0 )
- + NOE = 0
+ $ NOE = 0
*
* set ifm = 0 when form='T or 't' and 1 otherwise
*
IFM = 1
IF( LSAME( TRANSR, 'T' ) )
- + IFM = 0
+ $ IFM = 0
*
* set ilu = 0 when uplo='U or 'u' and 1 otherwise
*
ILU = 1
IF( LSAME( UPLO, 'U' ) )
- + ILU = 0
+ $ ILU = 0
*
* set lda = (n+1)/2 when ifm = 0
* set lda = n when ifm = 1 and noe = 1
@@ -265,7 +265,7 @@
END IF
END IF
ELSE IF( ( LSAME( NORM, 'I' ) ) .OR. ( LSAME( NORM, 'O' ) ) .OR.
- + ( NORM.EQ.'1' ) ) THEN
+ $ ( NORM.EQ.'1' ) ) THEN
*
* Find normI(A) ( = norm1(A), since A is symmetric).
*
@@ -289,7 +289,7 @@
* -> A(j+k,j+k)
WORK( J+K ) = S + AA
IF( I.EQ.K+K )
- + GO TO 10
+ $ GO TO 10
I = I + 1
AA = ABS( A( I+J*LDA ) )
* -> A(j,j)
@@ -735,7 +735,7 @@
END DO
DO J = 0, K - 2
CALL DLASSQ( K-J-1, A( J+1+( J+K-1 )*LDA ), 1,
- + SCALE, S )
+ $ SCALE, S )
* L at A(0,k-1)
END DO
S = S + S
@@ -817,7 +817,7 @@
END DO
DO J = 0, K - 2
CALL DLASSQ( K-J-1, A( J+1+( J+K )*LDA ), 1, SCALE,
- + S )
+ $ S )
* L at A(0,k)
END DO
S = S + S
diff --git a/SRC/dlarrv.f b/SRC/dlarrv.f
index a7d9a2cc..8adcd24f 100644
--- a/SRC/dlarrv.f
+++ b/SRC/dlarrv.f
@@ -332,7 +332,7 @@
* high relative accuracy is required for the computation of the
* corresponding eigenvectors.
CALL DCOPY( IM, W( WBEGIN ), 1,
- & WORK( WBEGIN ), 1 )
+ $ WORK( WBEGIN ), 1 )
* We store in W the eigenvalue approximations w.r.t. the original
* matrix T.
@@ -431,7 +431,7 @@
Q = INDEXW( WBEGIN-1+OLDLST )
* Offset for the arrays WORK, WGAP and WERR, i.e., the P-OFFSET
* through the Q-OFFSET elements of these arrays are to be used.
-C OFFSET = P-OLDFST
+* OFFSET = P-OLDFST
OFFSET = INDEXW( WBEGIN ) - 1
* perform limited bisection (if necessary) to get approximate
* eigenvalues to the precision needed.
@@ -570,7 +570,7 @@ C OFFSET = P-OLDFST
* Compute RRR of child cluster.
* Note that the new RRR is stored in Z
*
-C DLARRF needs LWORK = 2*N
+* DLARRF needs LWORK = 2*N
CALL DLARRF( IN, D( IBEGIN ), L( IBEGIN ),
$ WORK(INDLD+IBEGIN-1),
$ NEWFST, NEWLST, WORK(WBEGIN),
diff --git a/SRC/dlasq4.f b/SRC/dlasq4.f
index 554062f8..ea42bbc4 100644
--- a/SRC/dlasq4.f
+++ b/SRC/dlasq4.f
@@ -25,6 +25,9 @@
* DLASQ4 computes an approximation TAU to the smallest eigenvalue
* using values of d from the previous transform.
*
+* Arguments
+* =========
+*
* I0 (input) INTEGER
* First index.
*
diff --git a/SRC/dlat2s.f b/SRC/dlat2s.f
index 887b370d..8026a0ae 100644
--- a/SRC/dlat2s.f
+++ b/SRC/dlat2s.f
@@ -56,7 +56,7 @@
* PRECISION overflow threshold, in this case, the content
* of the UPLO part of SA in exit is unspecified.
*
-* =========
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
@@ -76,7 +76,7 @@
DO 20 J = 1, N
DO 10 I = 1, J
IF( ( A( I, J ).LT.-RMAX ) .OR. ( A( I, J ).GT.RMAX ) )
- + THEN
+ $ THEN
INFO = 1
GO TO 50
END IF
@@ -87,7 +87,7 @@
DO 40 J = 1, N
DO 30 I = J, N
IF( ( A( I, J ).LT.-RMAX ) .OR. ( A( I, J ).GT.RMAX ) )
- + THEN
+ $ THEN
INFO = 1
GO TO 50
END IF
diff --git a/SRC/dpftrf.f b/SRC/dpftrf.f
index cd9d183c..dd5e3075 100644
--- a/SRC/dpftrf.f
+++ b/SRC/dpftrf.f
@@ -192,7 +192,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -232,14 +232,14 @@
*
CALL DPOTRF( 'L', N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'R', 'L', 'T', 'N', N2, N1, ONE, A( 0 ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL DSYRK( 'U', 'N', N2, N1, -ONE, A( N1 ), N, ONE,
- + A( N ), N )
+ $ A( N ), N )
CALL DPOTRF( 'U', N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -249,14 +249,14 @@
*
CALL DPOTRF( 'L', N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'L', 'L', 'N', 'N', N1, N2, ONE, A( N2 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL DSYRK( 'U', 'T', N2, N1, -ONE, A( 0 ), N, ONE,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL DPOTRF( 'U', N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -272,14 +272,14 @@
*
CALL DPOTRF( 'U', N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'L', 'U', 'T', 'N', N1, N2, ONE, A( 0 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL DSYRK( 'L', 'T', N2, N1, -ONE, A( N1*N1 ), N1, ONE,
- + A( 1 ), N1 )
+ $ A( 1 ), N1 )
CALL DPOTRF( 'L', N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -289,14 +289,14 @@
*
CALL DPOTRF( 'U', N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'R', 'U', 'N', 'N', N2, N1, ONE, A( N2*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL DSYRK( 'L', 'N', N2, N1, -ONE, A( 0 ), N2, ONE,
- + A( N1*N2 ), N2 )
+ $ A( N1*N2 ), N2 )
CALL DPOTRF( 'L', N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -318,14 +318,14 @@
*
CALL DPOTRF( 'L', K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'R', 'L', 'T', 'N', K, K, ONE, A( 1 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL DSYRK( 'U', 'N', K, K, -ONE, A( K+1 ), N+1, ONE,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL DPOTRF( 'U', K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -335,14 +335,14 @@
*
CALL DPOTRF( 'L', K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'L', 'L', 'N', 'N', K, K, ONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL DSYRK( 'U', 'T', K, K, -ONE, A( 0 ), N+1, ONE,
- + A( K ), N+1 )
+ $ A( K ), N+1 )
CALL DPOTRF( 'U', K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
@@ -358,14 +358,14 @@
*
CALL DPOTRF( 'U', K, A( 0+K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'L', 'U', 'T', 'N', K, K, ONE, A( K ), N1,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL DSYRK( 'L', 'T', K, K, -ONE, A( K*( K+1 ) ), K, ONE,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL DPOTRF( 'L', K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -375,14 +375,14 @@
*
CALL DPOTRF( 'U', K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRSM( 'R', 'U', 'N', 'N', K, K, ONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL DSYRK( 'L', 'N', K, K, -ONE, A( 0 ), K, ONE,
- + A( K*K ), K )
+ $ A( K*K ), K )
CALL DPOTRF( 'L', K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
diff --git a/SRC/dpftri.f b/SRC/dpftri.f
index 01d0050b..cb9be9a8 100644
--- a/SRC/dpftri.f
+++ b/SRC/dpftri.f
@@ -184,13 +184,13 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Invert the triangular Cholesky factor U or L.
*
CALL DTFTRI( TRANSR, UPLO, 'N', N, A, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -231,9 +231,9 @@
*
CALL DLAUUM( 'L', N1, A( 0 ), N, INFO )
CALL DSYRK( 'L', 'T', N1, N2, ONE, A( N1 ), N, ONE,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL DTRMM( 'L', 'U', 'N', 'N', N2, N1, ONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL DLAUUM( 'U', N2, A( N ), N, INFO )
*
ELSE
@@ -244,9 +244,9 @@
*
CALL DLAUUM( 'L', N1, A( N2 ), N, INFO )
CALL DSYRK( 'L', 'N', N1, N2, ONE, A( 0 ), N, ONE,
- + A( N2 ), N )
+ $ A( N2 ), N )
CALL DTRMM( 'R', 'U', 'T', 'N', N1, N2, ONE, A( N1 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL DLAUUM( 'U', N2, A( N1 ), N, INFO )
*
END IF
@@ -262,9 +262,9 @@
*
CALL DLAUUM( 'U', N1, A( 0 ), N1, INFO )
CALL DSYRK( 'U', 'N', N1, N2, ONE, A( N1*N1 ), N1, ONE,
- + A( 0 ), N1 )
+ $ A( 0 ), N1 )
CALL DTRMM( 'R', 'L', 'N', 'N', N1, N2, ONE, A( 1 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL DLAUUM( 'L', N2, A( 1 ), N1, INFO )
*
ELSE
@@ -274,9 +274,9 @@
*
CALL DLAUUM( 'U', N1, A( N2*N2 ), N2, INFO )
CALL DSYRK( 'U', 'T', N1, N2, ONE, A( 0 ), N2, ONE,
- + A( N2*N2 ), N2 )
+ $ A( N2*N2 ), N2 )
CALL DTRMM( 'L', 'L', 'T', 'N', N2, N1, ONE, A( N1*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL DLAUUM( 'L', N2, A( N1*N2 ), N2, INFO )
*
END IF
@@ -299,9 +299,9 @@
*
CALL DLAUUM( 'L', K, A( 1 ), N+1, INFO )
CALL DSYRK( 'L', 'T', K, K, ONE, A( K+1 ), N+1, ONE,
- + A( 1 ), N+1 )
+ $ A( 1 ), N+1 )
CALL DTRMM( 'L', 'U', 'N', 'N', K, K, ONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL DLAUUM( 'U', K, A( 0 ), N+1, INFO )
*
ELSE
@@ -312,9 +312,9 @@
*
CALL DLAUUM( 'L', K, A( K+1 ), N+1, INFO )
CALL DSYRK( 'L', 'N', K, K, ONE, A( 0 ), N+1, ONE,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL DTRMM( 'R', 'U', 'T', 'N', K, K, ONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL DLAUUM( 'U', K, A( K ), N+1, INFO )
*
END IF
@@ -331,9 +331,9 @@
*
CALL DLAUUM( 'U', K, A( K ), K, INFO )
CALL DSYRK( 'U', 'N', K, K, ONE, A( K*( K+1 ) ), K, ONE,
- + A( K ), K )
+ $ A( K ), K )
CALL DTRMM( 'R', 'L', 'N', 'N', K, K, ONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL DLAUUM( 'L', K, A( 0 ), K, INFO )
*
ELSE
@@ -344,9 +344,9 @@
*
CALL DLAUUM( 'U', K, A( K*( K+1 ) ), K, INFO )
CALL DSYRK( 'U', 'T', K, K, ONE, A( 0 ), K, ONE,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL DTRMM( 'L', 'L', 'T', 'N', K, K, ONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL DLAUUM( 'L', K, A( K*K ), K, INFO )
*
END IF
diff --git a/SRC/dpftrs.f b/SRC/dpftrs.f
index 25c89e0f..dd60d905 100644
--- a/SRC/dpftrs.f
+++ b/SRC/dpftrs.f
@@ -186,20 +186,20 @@
* Quick return if possible
*
IF( N.EQ.0 .OR. NRHS.EQ.0 )
- + RETURN
+ $ RETURN
*
* start execution: there are two triangular solves
*
IF( LOWER ) THEN
CALL DTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
CALL DTFSM( TRANSR, 'L', UPLO, 'T', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
ELSE
CALL DTFSM( TRANSR, 'L', UPLO, 'T', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
CALL DTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
END IF
*
RETURN
diff --git a/SRC/dsfrk.f b/SRC/dsfrk.f
index 99d89c93..63a42a0a 100644
--- a/SRC/dsfrk.f
+++ b/SRC/dsfrk.f
@@ -1,5 +1,5 @@
SUBROUTINE DSFRK( TRANSR, UPLO, TRANS, N, K, ALPHA, A, LDA, BETA,
- + C )
+ $ C )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -106,8 +106,7 @@
* NT = N*(N+1)/2. On entry, the symmetric matrix C in RFP
* Format. RFP Format is described by TRANSR, UPLO and N.
*
-* Arguments
-* ==========
+* =====================================================================
*
* ..
* .. Parameters ..
@@ -167,7 +166,7 @@
* done (it is in DSYRK for example) and left in the general case.
*
IF( ( N.EQ.0 ) .OR. ( ( ( ALPHA.EQ.ZERO ) .OR. ( K.EQ.0 ) ) .AND.
- + ( BETA.EQ.ONE ) ) )RETURN
+ $ ( BETA.EQ.ONE ) ) )RETURN
*
IF( ( ALPHA.EQ.ZERO ) .AND. ( BETA.EQ.ZERO ) ) THEN
DO J = 1, ( ( N*( N+1 ) ) / 2 )
@@ -211,22 +210,22 @@
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL DSYRK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL DSYRK( 'U', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL DGEMM( 'N', 'T', N2, N1, K, ALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'T'
*
CALL DSYRK( 'L', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL DSYRK( 'U', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL DGEMM( 'T', 'N', N2, N1, K, ALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
*
END IF
*
@@ -239,22 +238,22 @@
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL DSYRK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL DSYRK( 'U', 'N', N2, K, ALPHA, A( N2, 1 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL DGEMM( 'N', 'T', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( N2, 1 ), LDA, BETA, C( 1 ), N )
+ $ LDA, A( N2, 1 ), LDA, BETA, C( 1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'T'
*
CALL DSYRK( 'L', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL DSYRK( 'U', 'T', N2, K, ALPHA, A( 1, N2 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL DGEMM( 'T', 'N', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, N2 ), LDA, BETA, C( 1 ), N )
+ $ LDA, A( 1, N2 ), LDA, BETA, C( 1 ), N )
*
END IF
*
@@ -273,24 +272,24 @@
* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'N'
*
CALL DSYRK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL DSYRK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL DGEMM( 'N', 'T', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( N1+1, 1 ), LDA, BETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( N1+1, 1 ), LDA, BETA,
+ $ C( N1*N1+1 ), N1 )
*
ELSE
*
* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'T'
*
CALL DSYRK( 'U', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL DSYRK( 'L', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL DGEMM( 'T', 'N', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, N1+1 ), LDA, BETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( 1, N1+1 ), LDA, BETA,
+ $ C( N1*N1+1 ), N1 )
*
END IF
*
@@ -303,22 +302,22 @@
* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'N'
*
CALL DSYRK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL DSYRK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL DGEMM( 'N', 'T', N2, N1, K, ALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
*
ELSE
*
* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'T'
*
CALL DSYRK( 'U', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL DSYRK( 'L', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL DGEMM( 'T', 'N', N2, N1, K, ALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
*
END IF
*
@@ -343,24 +342,24 @@
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL DSYRK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL DSYRK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL DGEMM( 'N', 'T', NK, NK, K, ALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'T'
*
CALL DSYRK( 'L', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL DSYRK( 'U', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL DGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
+ $ N+1 )
*
END IF
*
@@ -373,24 +372,24 @@
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL DSYRK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL DSYRK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL DGEMM( 'N', 'T', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, BETA, C( 1 ),
- + N+1 )
+ $ LDA, A( NK+1, 1 ), LDA, BETA, C( 1 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'T'
*
CALL DSYRK( 'L', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL DSYRK( 'U', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL DGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, BETA, C( 1 ),
- + N+1 )
+ $ LDA, A( 1, NK+1 ), LDA, BETA, C( 1 ),
+ $ N+1 )
*
END IF
*
@@ -409,24 +408,24 @@
* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'N'
*
CALL DSYRK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL DSYRK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL DGEMM( 'N', 'T', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, BETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( NK+1, 1 ), LDA, BETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'T'
*
CALL DSYRK( 'U', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL DSYRK( 'L', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL DGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, BETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( 1, NK+1 ), LDA, BETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
END IF
*
@@ -439,22 +438,22 @@
* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'N'
*
CALL DSYRK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL DSYRK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL DGEMM( 'N', 'T', NK, NK, K, ALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'T'
*
CALL DSYRK( 'U', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL DSYRK( 'L', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL DGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
*
END IF
*
diff --git a/SRC/dsgesv.f b/SRC/dsgesv.f
index 49b73daf..bc6b3ce5 100644
--- a/SRC/dsgesv.f
+++ b/SRC/dsgesv.f
@@ -1,5 +1,5 @@
SUBROUTINE DSGESV( N, NRHS, A, LDA, IPIV, B, LDB, X, LDX, WORK,
- + SWORK, ITER, INFO )
+ $ SWORK, ITER, INFO )
*
* -- LAPACK PROTOTYPE driver routine (version 3.2.2) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
@@ -14,7 +14,7 @@
INTEGER IPIV( * )
REAL SWORK( * )
DOUBLE PRECISION A( LDA, * ), B( LDB, * ), WORK( N, * ),
- + X( LDX, * )
+ $ X( LDX, * )
* ..
*
* Purpose
@@ -122,7 +122,7 @@
* but the factor U is exactly singular, so the solution
* could not be computed.
*
-* =========
+* =====================================================================
*
* .. Parameters ..
LOGICAL DOITREF
@@ -143,7 +143,7 @@
*
* .. External Subroutines ..
EXTERNAL DAXPY, DGEMM, DLACPY, DLAG2S, SLAG2D, SGETRF,
- + SGETRS, XERBLA
+ $ SGETRS, XERBLA
* ..
* .. External Functions ..
INTEGER IDAMAX
@@ -179,7 +179,7 @@
* Quick return if (N.EQ.0).
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Skip single precision iterative refinement if a priori slower
* than double precision factorization.
@@ -232,7 +232,7 @@
* Solve the system SA*SX = SB.
*
CALL SGETRS( 'No transpose', N, NRHS, SWORK( PTSA ), N, IPIV,
- + SWORK( PTSX ), N, INFO )
+ $ SWORK( PTSX ), N, INFO )
*
* Convert SX back to double precision
*
@@ -243,7 +243,7 @@
CALL DLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL DGEMM( 'No Transpose', 'No Transpose', N, NRHS, N, NEGONE, A,
- + LDA, X, LDX, ONE, WORK, N )
+ $ LDA, X, LDX, ONE, WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=0 and return.
@@ -252,7 +252,7 @@
XNRM = ABS( X( IDAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = ABS( WORK( IDAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 10
+ $ GO TO 10
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -278,7 +278,7 @@
* Solve the system SA*SX = SR.
*
CALL SGETRS( 'No transpose', N, NRHS, SWORK( PTSA ), N, IPIV,
- + SWORK( PTSX ), N, INFO )
+ $ SWORK( PTSX ), N, INFO )
*
* Convert SX back to double precision and update the current
* iterate.
@@ -294,7 +294,7 @@
CALL DLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL DGEMM( 'No Transpose', 'No Transpose', N, NRHS, N, NEGONE,
- + A, LDA, X, LDX, ONE, WORK, N )
+ $ A, LDA, X, LDX, ONE, WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=IITER>0 and return.
@@ -303,7 +303,7 @@
XNRM = ABS( X( IDAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = ABS( WORK( IDAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 20
+ $ GO TO 20
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -332,11 +332,11 @@
CALL DGETRF( N, N, A, LDA, IPIV, INFO )
*
IF( INFO.NE.0 )
- + RETURN
+ $ RETURN
*
CALL DLACPY( 'All', N, NRHS, B, LDB, X, LDX )
CALL DGETRS( 'No transpose', N, NRHS, A, LDA, IPIV, X, LDX,
- + INFO )
+ $ INFO )
*
RETURN
*
diff --git a/SRC/dsposv.f b/SRC/dsposv.f
index 1a40b123..e85bb44e 100644
--- a/SRC/dsposv.f
+++ b/SRC/dsposv.f
@@ -1,5 +1,5 @@
SUBROUTINE DSPOSV( UPLO, N, NRHS, A, LDA, B, LDB, X, LDX, WORK,
- + SWORK, ITER, INFO )
+ $ SWORK, ITER, INFO )
*
* -- LAPACK PROTOTYPE driver routine (version 3.3.0) --
* Univ. of Tennessee, Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..
@@ -13,7 +13,7 @@
* .. Array Arguments ..
REAL SWORK( * )
DOUBLE PRECISION A( LDA, * ), B( LDB, * ), WORK( N, * ),
- + X( LDX, * )
+ $ X( LDX, * )
* ..
*
* Purpose
@@ -126,7 +126,7 @@
* factorization could not be completed, and the solution
* has not been computed.
*
-* =========
+* =====================================================================
*
* .. Parameters ..
LOGICAL DOITREF
@@ -147,7 +147,7 @@
*
* .. External Subroutines ..
EXTERNAL DAXPY, DSYMM, DLACPY, DLAT2S, DLAG2S, SLAG2D,
- + SPOTRF, SPOTRS, XERBLA
+ $ SPOTRF, SPOTRS, XERBLA
* ..
* .. External Functions ..
INTEGER IDAMAX
@@ -186,7 +186,7 @@
* Quick return if (N.EQ.0).
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Skip single precision iterative refinement if a priori slower
* than double precision factorization.
@@ -239,7 +239,7 @@
* Solve the system SA*SX = SB.
*
CALL SPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N,
- + INFO )
+ $ INFO )
*
* Convert SX back to double precision
*
@@ -250,7 +250,7 @@
CALL DLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL DSYMM( 'Left', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE,
- + WORK, N )
+ $ WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=0 and return.
@@ -259,7 +259,7 @@
XNRM = ABS( X( IDAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = ABS( WORK( IDAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 10
+ $ GO TO 10
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -285,7 +285,7 @@
* Solve the system SA*SX = SR.
*
CALL SPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N,
- + INFO )
+ $ INFO )
*
* Convert SX back to double precision and update the current
* iterate.
@@ -301,7 +301,7 @@
CALL DLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL DSYMM( 'L', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE,
- + WORK, N )
+ $ WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=IITER>0 and return.
@@ -310,7 +310,7 @@
XNRM = ABS( X( IDAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = ABS( WORK( IDAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 20
+ $ GO TO 20
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -339,7 +339,7 @@
CALL DPOTRF( UPLO, N, A, LDA, INFO )
*
IF( INFO.NE.0 )
- + RETURN
+ $ RETURN
*
CALL DLACPY( 'All', N, NRHS, B, LDB, X, LDX )
CALL DPOTRS( UPLO, N, NRHS, A, LDA, X, LDX, INFO )
diff --git a/SRC/dtfsm.f b/SRC/dtfsm.f
index 5df27217..52088b56 100644
--- a/SRC/dtfsm.f
+++ b/SRC/dtfsm.f
@@ -1,5 +1,5 @@
SUBROUTINE DTFSM( TRANSR, SIDE, UPLO, TRANS, DIAG, M, N, ALPHA, A,
- + B, LDB )
+ $ B, LDB )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -222,7 +222,7 @@
* ..
* .. Local Scalars ..
LOGICAL LOWER, LSIDE, MISODD, NISODD, NORMALTRANSR,
- + NOTRANS
+ $ NOTRANS
INTEGER M1, M2, N1, N2, K, INFO, I, J
* ..
* .. External Functions ..
@@ -253,7 +253,7 @@
ELSE IF( .NOT.NOTRANS .AND. .NOT.LSAME( TRANS, 'T' ) ) THEN
INFO = -4
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -5
ELSE IF( M.LT.0 ) THEN
INFO = -6
@@ -270,7 +270,7 @@
* Quick return when ( (N.EQ.0).OR.(M.EQ.0) )
*
IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) )
- + RETURN
+ $ RETURN
*
* Quick return when ALPHA.EQ.(0D+0)
*
@@ -325,14 +325,14 @@
*
IF( M.EQ.1 ) THEN
CALL DTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A, M, B, LDB )
+ $ A, M, B, LDB )
ELSE
CALL DTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
CALL DGEMM( 'N', 'N', M2, N, M1, -ONE, A( M1 ),
- + M, B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ M, B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL DTRSM( 'L', 'U', 'T', DIAG, M2, N, ONE,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -342,14 +342,14 @@
*
IF( M.EQ.1 ) THEN
CALL DTRSM( 'L', 'L', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
ELSE
CALL DTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
CALL DGEMM( 'T', 'N', M1, N, M2, -ONE, A( M1 ),
- + M, B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ M, B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'L', 'T', DIAG, M1, N, ONE,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
END IF
*
END IF
@@ -364,11 +364,11 @@
* TRANS = 'N'
*
CALL DTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
CALL DGEMM( 'T', 'N', M2, N, M1, -ONE, A( 0 ), M,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL DTRSM( 'L', 'U', 'T', DIAG, M2, N, ONE,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
*
ELSE
*
@@ -376,11 +376,11 @@
* TRANS = 'T'
*
CALL DTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
CALL DGEMM( 'N', 'N', M1, N, M2, -ONE, A( 0 ), M,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'L', 'T', DIAG, M1, N, ONE,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
*
END IF
*
@@ -401,15 +401,15 @@
*
IF( M.EQ.1 ) THEN
CALL DTRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL DTRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
CALL DGEMM( 'T', 'N', M2, N, M1, -ONE,
- + A( M1*M1 ), M1, B, LDB, ALPHA,
- + B( M1, 0 ), LDB )
+ $ A( M1*M1 ), M1, B, LDB, ALPHA,
+ $ B( M1, 0 ), LDB )
CALL DTRSM( 'L', 'L', 'N', DIAG, M2, N, ONE,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -419,15 +419,15 @@
*
IF( M.EQ.1 ) THEN
CALL DTRSM( 'L', 'U', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL DTRSM( 'L', 'L', 'T', DIAG, M2, N, ALPHA,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
CALL DGEMM( 'N', 'N', M1, N, M2, -ONE,
- + A( M1*M1 ), M1, B( M1, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( M1*M1 ), M1, B( M1, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL DTRSM( 'L', 'U', 'N', DIAG, M1, N, ONE,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
END IF
*
END IF
@@ -442,11 +442,11 @@
* TRANS = 'N'
*
CALL DTRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
CALL DGEMM( 'N', 'N', M2, N, M1, -ONE, A( 0 ), M2,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL DTRSM( 'L', 'L', 'N', DIAG, M2, N, ONE,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
*
ELSE
*
@@ -454,11 +454,11 @@
* TRANS = 'T'
*
CALL DTRSM( 'L', 'L', 'T', DIAG, M2, N, ALPHA,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
CALL DGEMM( 'T', 'N', M1, N, M2, -ONE, A( 0 ), M2,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'U', 'N', DIAG, M1, N, ONE,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
*
END IF
*
@@ -484,11 +484,11 @@
* and TRANS = 'N'
*
CALL DTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
CALL DGEMM( 'N', 'N', K, N, K, -ONE, A( K+1 ),
- + M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
CALL DTRSM( 'L', 'U', 'T', DIAG, K, N, ONE,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
*
ELSE
*
@@ -496,11 +496,11 @@
* and TRANS = 'T'
*
CALL DTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
CALL DGEMM( 'T', 'N', K, N, K, -ONE, A( K+1 ),
- + M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'L', 'T', DIAG, K, N, ONE,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
*
END IF
*
@@ -514,22 +514,22 @@
* and TRANS = 'N'
*
CALL DTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
CALL DGEMM( 'T', 'N', K, N, K, -ONE, A( 0 ), M+1,
- + B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ B, LDB, ALPHA, B( K, 0 ), LDB )
CALL DTRSM( 'L', 'U', 'T', DIAG, K, N, ONE,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
*
ELSE
*
* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U',
* and TRANS = 'T'
CALL DTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
CALL DGEMM( 'N', 'N', K, N, K, -ONE, A( 0 ), M+1,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'L', 'T', DIAG, K, N, ONE,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
*
END IF
*
@@ -549,12 +549,12 @@
* and TRANS = 'N'
*
CALL DTRSM( 'L', 'U', 'T', DIAG, K, N, ALPHA,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
CALL DGEMM( 'T', 'N', K, N, K, -ONE,
- + A( K*( K+1 ) ), K, B, LDB, ALPHA,
- + B( K, 0 ), LDB )
+ $ A( K*( K+1 ) ), K, B, LDB, ALPHA,
+ $ B( K, 0 ), LDB )
CALL DTRSM( 'L', 'L', 'N', DIAG, K, N, ONE,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -562,12 +562,12 @@
* and TRANS = 'T'
*
CALL DTRSM( 'L', 'L', 'T', DIAG, K, N, ALPHA,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
CALL DGEMM( 'N', 'N', K, N, K, -ONE,
- + A( K*( K+1 ) ), K, B( K, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( K*( K+1 ) ), K, B( K, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL DTRSM( 'L', 'U', 'N', DIAG, K, N, ONE,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
*
END IF
*
@@ -581,11 +581,11 @@
* and TRANS = 'N'
*
CALL DTRSM( 'L', 'U', 'T', DIAG, K, N, ALPHA,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
CALL DGEMM( 'N', 'N', K, N, K, -ONE, A( 0 ), K, B,
- + LDB, ALPHA, B( K, 0 ), LDB )
+ $ LDB, ALPHA, B( K, 0 ), LDB )
CALL DTRSM( 'L', 'L', 'N', DIAG, K, N, ONE,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -593,11 +593,11 @@
* and TRANS = 'T'
*
CALL DTRSM( 'L', 'L', 'T', DIAG, K, N, ALPHA,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
CALL DGEMM( 'T', 'N', K, N, K, -ONE, A( 0 ), K,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL DTRSM( 'L', 'U', 'N', DIAG, K, N, ONE,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
*
END IF
*
@@ -647,12 +647,12 @@
* TRANS = 'N'
*
CALL DTRSM( 'R', 'U', 'T', DIAG, M, N2, ALPHA,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
CALL DGEMM( 'N', 'N', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL DTRSM( 'R', 'L', 'N', DIAG, M, N1, ONE,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
*
ELSE
*
@@ -660,12 +660,12 @@
* TRANS = 'T'
*
CALL DTRSM( 'R', 'L', 'T', DIAG, M, N1, ALPHA,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'T', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'N', DIAG, M, N2, ONE,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
*
END IF
*
@@ -679,12 +679,12 @@
* TRANS = 'N'
*
CALL DTRSM( 'R', 'L', 'T', DIAG, M, N1, ALPHA,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'N', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'N', DIAG, M, N2, ONE,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
*
ELSE
*
@@ -692,11 +692,11 @@
* TRANS = 'T'
*
CALL DTRSM( 'R', 'U', 'T', DIAG, M, N2, ALPHA,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
CALL DGEMM( 'N', 'T', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
CALL DTRSM( 'R', 'L', 'N', DIAG, M, N1, ONE,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
*
END IF
*
@@ -716,12 +716,12 @@
* TRANS = 'N'
*
CALL DTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
CALL DGEMM( 'N', 'T', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'T', DIAG, M, N1, ONE,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -729,12 +729,12 @@
* TRANS = 'T'
*
CALL DTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'N', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL DTRSM( 'R', 'L', 'T', DIAG, M, N2, ONE,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
*
END IF
*
@@ -748,12 +748,12 @@
* TRANS = 'N'
*
CALL DTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'T', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL DTRSM( 'R', 'L', 'T', DIAG, M, N2, ONE,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
*
ELSE
*
@@ -761,12 +761,12 @@
* TRANS = 'T'
*
CALL DTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
CALL DGEMM( 'N', 'N', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'T', DIAG, M, N1, ONE,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
*
END IF
*
@@ -792,12 +792,12 @@
* and TRANS = 'N'
*
CALL DTRSM( 'R', 'U', 'T', DIAG, M, K, ALPHA,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
CALL DGEMM( 'N', 'N', M, K, K, -ONE, B( 0, K ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL DTRSM( 'R', 'L', 'N', DIAG, M, K, ONE,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -805,12 +805,12 @@
* and TRANS = 'T'
*
CALL DTRSM( 'R', 'L', 'T', DIAG, M, K, ALPHA,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'T', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'N', DIAG, M, K, ONE,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
*
END IF
*
@@ -824,12 +824,12 @@
* and TRANS = 'N'
*
CALL DTRSM( 'R', 'L', 'T', DIAG, M, K, ALPHA,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'N', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL DTRSM( 'R', 'U', 'N', DIAG, M, K, ONE,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
*
ELSE
*
@@ -837,12 +837,12 @@
* and TRANS = 'T'
*
CALL DTRSM( 'R', 'U', 'T', DIAG, M, K, ALPHA,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
CALL DGEMM( 'N', 'T', M, K, K, -ONE, B( 0, K ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL DTRSM( 'R', 'L', 'N', DIAG, M, K, ONE,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
*
END IF
*
@@ -862,12 +862,12 @@
* and TRANS = 'N'
*
CALL DTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
CALL DGEMM( 'N', 'T', M, K, K, -ONE, B( 0, K ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, 0 ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, 0 ), LDB )
CALL DTRSM( 'R', 'U', 'T', DIAG, M, K, ONE,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
*
ELSE
*
@@ -875,12 +875,12 @@
* and TRANS = 'T'
*
CALL DTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'N', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, K ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, K ), LDB )
CALL DTRSM( 'R', 'L', 'T', DIAG, M, K, ONE,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
*
END IF
*
@@ -894,11 +894,11 @@
* and TRANS = 'N'
*
CALL DTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
CALL DGEMM( 'N', 'T', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
CALL DTRSM( 'R', 'L', 'T', DIAG, M, K, ONE,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
*
ELSE
*
@@ -906,11 +906,11 @@
* and TRANS = 'T'
*
CALL DTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
CALL DGEMM( 'N', 'N', M, K, K, -ONE, B( 0, K ),
- + LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
CALL DTRSM( 'R', 'U', 'T', DIAG, M, K, ONE,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
*
END IF
*
diff --git a/SRC/dtftri.f b/SRC/dtftri.f
index 2d6bcf54..066bacec 100644
--- a/SRC/dtftri.f
+++ b/SRC/dtftri.f
@@ -181,7 +181,7 @@
ELSE IF( .NOT.LOWER .AND. .NOT.LSAME( UPLO, 'U' ) ) THEN
INFO = -2
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -3
ELSE IF( N.LT.0 ) THEN
INFO = -4
@@ -194,7 +194,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -235,16 +235,16 @@
*
CALL DTRTRI( 'L', DIAG, N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'L', 'N', DIAG, N2, N1, -ONE, A( 0 ),
- + N, A( N1 ), N )
+ $ N, A( N1 ), N )
CALL DTRTRI( 'U', DIAG, N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'U', 'T', DIAG, N2, N1, ONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
*
ELSE
*
@@ -254,16 +254,16 @@
*
CALL DTRTRI( 'L', DIAG, N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'L', 'T', DIAG, N1, N2, -ONE, A( N2 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
CALL DTRTRI( 'U', DIAG, N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'U', 'N', DIAG, N1, N2, ONE, A( N1 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
*
END IF
*
@@ -278,16 +278,16 @@
*
CALL DTRTRI( 'U', DIAG, N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'U', 'N', DIAG, N1, N2, -ONE, A( 0 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
CALL DTRTRI( 'L', DIAG, N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'L', 'T', DIAG, N1, N2, ONE, A( 1 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
*
ELSE
*
@@ -296,16 +296,16 @@
*
CALL DTRTRI( 'U', DIAG, N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'U', 'T', DIAG, N2, N1, -ONE,
- + A( N2*N2 ), N2, A( 0 ), N2 )
+ $ A( N2*N2 ), N2, A( 0 ), N2 )
CALL DTRTRI( 'L', DIAG, N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'L', 'N', DIAG, N2, N1, ONE,
- + A( N1*N2 ), N2, A( 0 ), N2 )
+ $ A( N1*N2 ), N2, A( 0 ), N2 )
END IF
*
END IF
@@ -326,16 +326,16 @@
*
CALL DTRTRI( 'L', DIAG, K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'L', 'N', DIAG, K, K, -ONE, A( 1 ),
- + N+1, A( K+1 ), N+1 )
+ $ N+1, A( K+1 ), N+1 )
CALL DTRTRI( 'U', DIAG, K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'U', 'T', DIAG, K, K, ONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
*
ELSE
*
@@ -345,16 +345,16 @@
*
CALL DTRTRI( 'L', DIAG, K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'L', 'T', DIAG, K, K, -ONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL DTRTRI( 'U', DIAG, K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'U', 'N', DIAG, K, K, ONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
END IF
ELSE
*
@@ -368,16 +368,16 @@
*
CALL DTRTRI( 'U', DIAG, K, A( K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'U', 'N', DIAG, K, K, -ONE, A( K ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL DTRTRI( 'L', DIAG, K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'L', 'T', DIAG, K, K, ONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
ELSE
*
* SRPA for UPPER, TRANSPOSE and N is even (see paper)
@@ -386,16 +386,16 @@
*
CALL DTRTRI( 'U', DIAG, K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'R', 'U', 'T', DIAG, K, K, -ONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL DTRTRI( 'L', DIAG, K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL DTRMM( 'L', 'L', 'N', DIAG, K, K, ONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
END IF
END IF
END IF
diff --git a/SRC/dtfttp.f b/SRC/dtfttp.f
index acf895e7..00adb2e7 100644
--- a/SRC/dtfttp.f
+++ b/SRC/dtfttp.f
@@ -175,7 +175,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -218,7 +218,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/dtfttr.f b/SRC/dtfttr.f
index 7ec769f4..4cf87aad 100644
--- a/SRC/dtfttr.f
+++ b/SRC/dtfttr.f
@@ -214,11 +214,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/dtpttf.f b/SRC/dtpttf.f
index 1d58158f..8f2b85da 100644
--- a/SRC/dtpttf.f
+++ b/SRC/dtpttf.f
@@ -177,7 +177,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -220,7 +220,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/dtrttf.f b/SRC/dtrttf.f
index 2239dc27..e0ca3b4f 100644
--- a/SRC/dtrttf.f
+++ b/SRC/dtrttf.f
@@ -211,11 +211,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/ieeeck.f b/SRC/ieeeck.f
index f4cfcff9..befe08c5 100644
--- a/SRC/ieeeck.f
+++ b/SRC/ieeeck.f
@@ -39,6 +39,8 @@
* = 0: Arithmetic failed to produce the correct answers
* = 1: Arithmetic produced the correct answers
*
+* =====================================================================
+*
* .. Local Scalars ..
REAL NAN1, NAN2, NAN3, NAN4, NAN5, NAN6, NEGINF,
$ NEGZRO, NEWZRO, POSINF
diff --git a/SRC/sgejsv.f b/SRC/sgejsv.f
index aaef93f4..7873bbbd 100644
--- a/SRC/sgejsv.f
+++ b/SRC/sgejsv.f
@@ -1,6 +1,6 @@
SUBROUTINE SGEJSV( JOBA, JOBU, JOBV, JOBR, JOBT, JOBP,
- & M, N, A, LDA, SVA, U, LDU, V, LDV,
- & WORK, LWORK, IWORK, INFO )
+ $ M, N, A, LDA, SVA, U, LDU, V, LDV,
+ $ WORK, LWORK, IWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -17,14 +17,12 @@
* eigenvalue problems Hx = lambda M x, H M x = lambda x with H, M > 0.
*
* .. Scalar Arguments ..
-*
IMPLICIT NONE
INTEGER INFO, LDA, LDU, LDV, LWORK, M, N
-*
+* ..
* .. Array Arguments ..
-*
REAL A( LDA, * ), SVA( N ), U( LDU, * ), V( LDV, * ),
- & WORK( LWORK )
+ $ WORK( LWORK )
INTEGER IWORK( * )
CHARACTER*1 JOBA, JOBP, JOBR, JOBT, JOBU, JOBV
* ..
@@ -391,16 +389,16 @@
* ..
* .. Local Scalars ..
REAL AAPP, AAQQ, AATMAX, AATMIN, BIG, BIG1, COND_OK,
- & CONDR1, CONDR2, ENTRA, ENTRAT, EPSLN, MAXPRJ, SCALEM,
- & SCONDA, SFMIN, SMALL, TEMP1, USCAL1, USCAL2, XSC
+ $ CONDR1, CONDR2, ENTRA, ENTRAT, EPSLN, MAXPRJ, SCALEM,
+ $ SCONDA, SFMIN, SMALL, TEMP1, USCAL1, USCAL2, XSC
INTEGER IERR, N1, NR, NUMRANK, p, q, WARNING
LOGICAL ALMORT, DEFR, ERREST, GOSCAL, JRACC, KILL, LSVEC,
- & L2ABER, L2KILL, L2PERT, L2RANK, L2TRAN,
- & NOSCAL, ROWPIV, RSVEC, TRANSP
+ $ L2ABER, L2KILL, L2PERT, L2RANK, L2TRAN,
+ $ NOSCAL, ROWPIV, RSVEC, TRANSP
* ..
* .. Intrinsic Functions ..
INTRINSIC ABS, ALOG, AMAX1, AMIN1, FLOAT,
- & MAX0, MIN0, NINT, SIGN, SQRT
+ $ MAX0, MIN0, NINT, SIGN, SQRT
* ..
* .. External Functions ..
REAL SLAMCH, SNRM2
@@ -410,8 +408,8 @@
* ..
* .. External Subroutines ..
EXTERNAL SCOPY, SGELQF, SGEQP3, SGEQRF, SLACPY, SLASCL,
- & SLASET, SLASSQ, SLASWP, SORGQR, SORMLQ,
- & SORMQR, SPOCON, SSCAL, SSWAP, STRSM, XERBLA
+ $ SLASET, SLASSQ, SLASWP, SORGQR, SORMLQ,
+ $ SORMQR, SPOCON, SSCAL, SSWAP, STRSM, XERBLA
*
EXTERNAL SGESVJ
* ..
@@ -431,13 +429,13 @@
L2PERT = LSAME( JOBP, 'P' )
*
IF ( .NOT.(ROWPIV .OR. L2RANK .OR. L2ABER .OR.
- & ERREST .OR. LSAME( JOBA, 'C' ) )) THEN
+ $ ERREST .OR. LSAME( JOBA, 'C' ) )) THEN
INFO = - 1
ELSE IF ( .NOT.( LSVEC .OR. LSAME( JOBU, 'N' ) .OR.
- & LSAME( JOBU, 'W' )) ) THEN
+ $ LSAME( JOBU, 'W' )) ) THEN
INFO = - 2
ELSE IF ( .NOT.( RSVEC .OR. LSAME( JOBV, 'N' ) .OR.
- & LSAME( JOBV, 'W' )) .OR. ( JRACC .AND. (.NOT.LSVEC) ) ) THEN
+ $ LSAME( JOBV, 'W' )) .OR. ( JRACC .AND. (.NOT.LSVEC) ) ) THEN
INFO = - 3
ELSE IF ( .NOT. ( L2KILL .OR. DEFR ) ) THEN
INFO = - 4
@@ -456,18 +454,18 @@
ELSE IF ( RSVEC .AND. ( LDV .LT. N ) ) THEN
INFO = - 14
ELSE IF ( (.NOT.(LSVEC .OR. RSVEC .OR. ERREST).AND.
- & (LWORK .LT. MAX0(7,4*N+1,2*M+N))) .OR.
- & (.NOT.(LSVEC .OR. RSVEC) .AND. ERREST .AND.
- & (LWORK .LT. MAX0(7,4*N+N*N,2*M+N))) .OR.
- & (LSVEC .AND. (.NOT.RSVEC) .AND. (LWORK .LT. MAX0(7,2*M+N,4*N+1)))
- & .OR.
- & (RSVEC .AND. (.NOT.LSVEC) .AND. (LWORK .LT. MAX0(7,2*M+N,4*N+1)))
- & .OR.
- & (LSVEC .AND. RSVEC .AND. (.NOT.JRACC) .AND.
- & (LWORK.LT.MAX0(2*M+N,6*N+2*N*N)))
- & .OR. (LSVEC .AND. RSVEC .AND. JRACC .AND.
- & LWORK.LT.MAX0(2*M+N,4*N+N*N,2*N+N*N+6)))
- & THEN
+ $ (LWORK .LT. MAX0(7,4*N+1,2*M+N))) .OR.
+ $ (.NOT.(LSVEC .OR. RSVEC) .AND. ERREST .AND.
+ $ (LWORK .LT. MAX0(7,4*N+N*N,2*M+N))) .OR.
+ $ (LSVEC .AND. (.NOT.RSVEC) .AND. (LWORK .LT. MAX0(7,2*M+N,4*N+1)))
+ $ .OR.
+ $ (RSVEC .AND. (.NOT.LSVEC) .AND. (LWORK .LT. MAX0(7,2*M+N,4*N+1)))
+ $ .OR.
+ $ (LSVEC .AND. RSVEC .AND. (.NOT.JRACC) .AND.
+ $ (LWORK.LT.MAX0(2*M+N,6*N+2*N*N)))
+ $ .OR. (LSVEC .AND. RSVEC .AND. JRACC .AND.
+ $ LWORK.LT.MAX0(2*M+N,4*N+N*N,2*N+N*N+6)))
+ $ THEN
INFO = - 17
ELSE
* #:)
@@ -858,8 +856,8 @@
TEMP1 = SQRT(SFMIN)
DO 3401 p = 2, N
IF ( ( ABS(A(p,p)) .LT. (EPSLN*ABS(A(p-1,p-1))) ) .OR.
- & ( ABS(A(p,p)) .LT. SMALL ) .OR.
- & ( L2KILL .AND. (ABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3402
+ $ ( ABS(A(p,p)) .LT. SMALL ) .OR.
+ $ ( L2KILL .AND. (ABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3402
NR = NR + 1
3401 CONTINUE
3402 CONTINUE
@@ -875,7 +873,7 @@
TEMP1 = SQRT(SFMIN)
DO 3301 p = 2, N
IF ( ( ABS(A(p,p)) .LT. SMALL ) .OR.
- & ( L2KILL .AND. (ABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3302
+ $ ( L2KILL .AND. (ABS(A(p,p)) .LT. TEMP1) ) ) GO TO 3302
NR = NR + 1
3301 CONTINUE
3302 CONTINUE
@@ -907,7 +905,7 @@
CALL SSCAL( p, ONE/TEMP1, V(1,p), 1 )
3053 CONTINUE
CALL SPOCON( 'U', N, V, LDV, ONE, TEMP1,
- & WORK(N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+1), IWORK(2*N+M+1), IERR )
ELSE IF ( LSVEC ) THEN
* .. U is available as workspace
CALL SLACPY( 'U', N, N, A, LDA, U, LDU )
@@ -916,7 +914,7 @@
CALL SSCAL( p, ONE/TEMP1, U(1,p), 1 )
3054 CONTINUE
CALL SPOCON( 'U', N, U, LDU, ONE, TEMP1,
- & WORK(N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+1), IWORK(2*N+M+1), IERR )
ELSE
CALL SLACPY( 'U', N, N, A, LDA, WORK(N+1), N )
DO 3052 p = 1, N
@@ -925,7 +923,7 @@
3052 CONTINUE
* .. the columns of R are scaled to have unit Euclidean lengths.
CALL SPOCON( 'U', N, WORK(N+1), N, ONE, TEMP1,
- & WORK(N+N*N+1), IWORK(2*N+M+1), IERR )
+ $ WORK(N+N*N+1), IWORK(2*N+M+1), IERR )
END IF
SCONDA = ONE / SQRT(TEMP1)
* SCONDA is an estimate of SQRT(||(R^t * R)^(-1)||_1).
@@ -970,8 +968,8 @@
TEMP1 = XSC*ABS(A(q,q))
DO 4949 p = 1, N
IF ( ( (p.GT.q) .AND. (ABS(A(p,q)).LE.TEMP1) )
- & .OR. ( p .LT. q ) )
- & A(p,q) = SIGN( TEMP1, A(p,q) )
+ $ .OR. ( p .LT. q ) )
+ $ A(p,q) = SIGN( TEMP1, A(p,q) )
4949 CONTINUE
4947 CONTINUE
ELSE
@@ -1000,8 +998,8 @@
TEMP1 = XSC*ABS(A(q,q))
DO 1949 p = 1, NR
IF ( ( (p.GT.q) .AND. (ABS(A(p,q)).LE.TEMP1) )
- & .OR. ( p .LT. q ) )
- & A(p,q) = SIGN( TEMP1, A(p,q) )
+ $ .OR. ( p .LT. q ) )
+ $ A(p,q) = SIGN( TEMP1, A(p,q) )
1949 CONTINUE
1947 CONTINUE
ELSE
@@ -1013,7 +1011,7 @@
* the part which destroys triangular form (confusing?!))
*
CALL SGESVJ( 'L', 'NoU', 'NoV', NR, NR, A, LDA, SVA,
- & N, V, LDV, WORK, LWORK, INFO )
+ $ N, V, LDV, WORK, LWORK, INFO )
*
SCALEM = WORK(1)
NUMRANK = NINT(WORK(2))
@@ -1032,7 +1030,7 @@
CALL SLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
*
CALL SGESVJ( 'L','U','N', N, NR, V,LDV, SVA, NR, A,LDA,
- & WORK, LWORK, INFO )
+ $ WORK, LWORK, INFO )
SCALEM = WORK(1)
NUMRANK = NINT(WORK(2))
@@ -1046,14 +1044,14 @@
CALL SLACPY( 'Lower', NR, NR, A, LDA, V, LDV )
CALL SLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
CALL SGEQRF( NR, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
DO 8998 p = 1, NR
CALL SCOPY( NR-p+1, V(p,p), LDV, V(p,p), 1 )
8998 CONTINUE
CALL SLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, V(1,2), LDV )
*
CALL SGESVJ( 'Lower', 'U','N', NR, NR, V,LDV, SVA, NR, U,
- & LDU, WORK(N+1), LWORK-N, INFO )
+ $ LDU, WORK(N+1), LWORK-N, INFO )
SCALEM = WORK(N+1)
NUMRANK = NINT(WORK(N+2))
IF ( NR .LT. N ) THEN
@@ -1063,7 +1061,7 @@
END IF
*
CALL SORMLQ( 'Left', 'Transpose', N, N, NR, A, LDA, WORK,
- & V, LDV, WORK(N+1), LWORK-N, IERR )
+ $ V, LDV, WORK(N+1), LWORK-N, IERR )
*
END IF
*
@@ -1088,7 +1086,7 @@
CALL SLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, U(1,2), LDU )
*
CALL SGEQRF( N, NR, U, LDU, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
*
DO 1967 p = 1, NR - 1
CALL SCOPY( NR-p, U(p,p+1), LDU, U(p+1,p), 1 )
@@ -1096,7 +1094,7 @@
CALL SLASET( 'Upper', NR-1, NR-1, ZERO, ZERO, U(1,2), LDU )
*
CALL SGESVJ( 'Lower', 'U', 'N', NR,NR, U, LDU, SVA, NR, A,
- & LDA, WORK(N+1), LWORK-N, INFO )
+ $ LDA, WORK(N+1), LWORK-N, INFO )
SCALEM = WORK(N+1)
NUMRANK = NINT(WORK(N+2))
*
@@ -1109,10 +1107,10 @@
END IF
*
CALL SORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
*
IF ( ROWPIV )
- & CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
DO 1974 p = 1, N1
XSC = ONE / SNRM2( M, U(1,p), 1 )
@@ -1160,9 +1158,9 @@
TEMP1 = XSC*ABS( V(q,q) )
DO 2968 p = 1, N
IF ( ( p .GT. q ) .AND. ( ABS(V(p,q)) .LE. TEMP1 )
- & .OR. ( p .LT. q ) )
- & V(p,q) = SIGN( TEMP1, V(p,q) )
- IF ( p. LT. q ) V(p,q) = - V(p,q)
+ $ .OR. ( p .LT. q ) )
+ $ V(p,q) = SIGN( TEMP1, V(p,q) )
+ IF ( p .LT. q ) V(p,q) = - V(p,q)
2968 CONTINUE
2969 CONTINUE
ELSE
@@ -1179,7 +1177,7 @@
CALL SSCAL(NR-p+1,ONE/TEMP1,WORK(2*N+(p-1)*NR+p),1)
3950 CONTINUE
CALL SPOCON('Lower',NR,WORK(2*N+1),NR,ONE,TEMP1,
- & WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR)
+ $ WORK(2*N+NR*NR+1),IWORK(M+2*N+1),IERR)
CONDR1 = ONE / SQRT(TEMP1)
* .. here need a second oppinion on the condition number
* .. then assume worst case scenario
@@ -1195,7 +1193,7 @@
* of a lower triangular matrix.
* R1^t = Q2 * R2
CALL SGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
*
IF ( L2PERT ) THEN
XSC = SQRT(SMALL)/EPSLN
@@ -1203,14 +1201,14 @@
DO 3958 q = 1, p - 1
TEMP1 = XSC * AMIN1(ABS(V(p,p)),ABS(V(q,q)))
IF ( ABS(V(q,p)) .LE. TEMP1 )
- & V(q,p) = SIGN( TEMP1, V(q,p) )
+ $ V(q,p) = SIGN( TEMP1, V(q,p) )
3958 CONTINUE
3959 CONTINUE
END IF
*
IF ( NR .NE. N )
+ $ CALL SLACPY( 'A', N, NR, V, LDV, WORK(2*N+1), N )
* .. save ...
- & CALL SLACPY( 'A', N, NR, V, LDV, WORK(2*N+1), N )
*
* .. this transposed copy should be better than naive
DO 1969 p = 1, NR - 1
@@ -1233,16 +1231,16 @@
IWORK(N+p) = 0
3003 CONTINUE
CALL SGEQP3( N, NR, V, LDV, IWORK(N+1), WORK(N+1),
- & WORK(2*N+1), LWORK-2*N, IERR )
+ $ WORK(2*N+1), LWORK-2*N, IERR )
** CALL SGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
-** & LWORK-2*N, IERR )
+** $ LWORK-2*N, IERR )
IF ( L2PERT ) THEN
XSC = SQRT(SMALL)
DO 3969 p = 2, NR
DO 3968 q = 1, p - 1
TEMP1 = XSC * AMIN1(ABS(V(p,p)),ABS(V(q,q)))
IF ( ABS(V(q,p)) .LE. TEMP1 )
- & V(q,p) = SIGN( TEMP1, V(q,p) )
+ $ V(q,p) = SIGN( TEMP1, V(q,p) )
3968 CONTINUE
3969 CONTINUE
END IF
@@ -1262,7 +1260,7 @@
END IF
* Now, compute R2 = L3 * Q3, the LQ factorization.
CALL SGELQF( NR, NR, V, LDV, WORK(2*N+N*NR+1),
- & WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, IERR )
+ $ WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, IERR )
* .. and estimate the condition number
CALL SLACPY( 'L',NR,NR,V,LDV,WORK(2*N+N*NR+NR+1),NR )
DO 4950 p = 1, NR
@@ -1270,7 +1268,7 @@
CALL SSCAL( p, ONE/TEMP1, WORK(2*N+N*NR+NR+p), NR )
4950 CONTINUE
CALL SPOCON( 'L',NR,WORK(2*N+N*NR+NR+1),NR,ONE,TEMP1,
- & WORK(2*N+N*NR+NR+NR*NR+1),IWORK(M+2*N+1),IERR )
+ $ WORK(2*N+N*NR+NR+NR*NR+1),IWORK(M+2*N+1),IERR )
CONDR2 = ONE / SQRT(TEMP1)
*
IF ( CONDR2 .GE. COND_OK ) THEN
@@ -1307,7 +1305,7 @@
IF ( CONDR1 .LT. COND_OK ) THEN
*
CALL SGESVJ( 'L','U','N',NR,NR,V,LDV,SVA,NR,U,
- & LDU,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,INFO )
+ $ LDU,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = NINT(WORK(2*N+N*NR+NR+2))
DO 3970 p = 1, NR
@@ -1317,7 +1315,7 @@
* .. pick the right matrix equation and solve it
*
- IF ( NR. EQ. N ) THEN
+ IF ( NR .EQ. N ) THEN
* :)) .. best case, R1 is inverted. The solution of this matrix
* equation is Q2*V2 = the product of the Jacobi rotations
* used in SGESVJ, premultiplied with the orthogonal matrix
@@ -1329,14 +1327,14 @@
* used in SGESVJ. The Q-factor from the second QR
* factorization is then built in explicitly.
CALL STRSM('L','U','T','N',NR,NR,ONE,WORK(2*N+1),
- & N,V,LDV)
+ $ N,V,LDV)
IF ( NR .LT. N ) THEN
CALL SLASET('A',N-NR,NR,ZERO,ZERO,V(NR+1,1),LDV)
CALL SLASET('A',NR,N-NR,ZERO,ZERO,V(1,NR+1),LDV)
CALL SLASET('A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV)
END IF
CALL SORMQR('L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR)
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR)
END IF
*
ELSE IF ( CONDR2 .LT. COND_OK ) THEN
@@ -1348,7 +1346,7 @@
* the lower triangular L3 from the LQ factorization of
* R2=L3*Q3), pre-multiplied with the transposed Q3.
CALL SGESVJ( 'L', 'U', 'N', NR, NR, V, LDV, SVA, NR, U,
- & LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
+ $ LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = NINT(WORK(2*N+N*NR+NR+2))
DO 3870 p = 1, NR
@@ -1371,7 +1369,7 @@
CALL SLASET( 'A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV )
END IF
CALL SORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
ELSE
* Last line of defense.
* #:( This is a rather pathological case: no scaled condition
@@ -1385,7 +1383,7 @@
* Compute the full SVD of L3 using SGESVJ with explicit
* accumulation of Jacobi rotations.
CALL SGESVJ( 'L', 'U', 'V', NR, NR, V, LDV, SVA, NR, U,
- & LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
+ $ LDU, WORK(2*N+N*NR+NR+1), LWORK-2*N-N*NR-NR, INFO )
SCALEM = WORK(2*N+N*NR+NR+1)
NUMRANK = NINT(WORK(2*N+N*NR+NR+2))
IF ( NR .LT. N ) THEN
@@ -1394,11 +1392,11 @@
CALL SLASET( 'A',N-NR,N-NR,ZERO,ONE,V(NR+1,NR+1),LDV )
END IF
CALL SORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
*
CALL SORMLQ( 'L', 'T', NR, NR, NR, WORK(2*N+1), N,
- & WORK(2*N+N*NR+1), U, LDU, WORK(2*N+N*NR+NR+1),
- & LWORK-2*N-N*NR-NR, IERR )
+ $ WORK(2*N+N*NR+1), U, LDU, WORK(2*N+N*NR+NR+1),
+ $ LWORK-2*N-N*NR-NR, IERR )
DO 773 q = 1, NR
DO 772 p = 1, NR
WORK(2*N+N*NR+NR+IWORK(N+p)) = U(p,q)
@@ -1424,7 +1422,7 @@
973 CONTINUE
XSC = ONE / SNRM2( N, V(1,q), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL SSCAL( N, XSC, V(1,q), 1 )
+ $ CALL SSCAL( N, XSC, V(1,q), 1 )
1972 CONTINUE
* At this moment, V contains the right singular vectors of A.
* Next, assemble the left singular vector matrix U (M x N).
@@ -1440,21 +1438,21 @@
* matrix U. This applies to all cases.
*
CALL SORMQR( 'Left', 'No_Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
* The columns of U are normalized. The cost is O(M*N) flops.
TEMP1 = SQRT(FLOAT(M)) * EPSLN
DO 1973 p = 1, NR
XSC = ONE / SNRM2( M, U(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL SSCAL( M, XSC, U(1,p), 1 )
+ $ CALL SSCAL( M, XSC, U(1,p), 1 )
1973 CONTINUE
*
* If the initial QRF is computed with row pivoting, the left
* singular vectors must be adjusted.
*
IF ( ROWPIV )
- & CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
ELSE
*
@@ -1475,7 +1473,7 @@
END IF
*
CALL SGESVJ( 'Upper', 'U', 'N', N, N, WORK(N+1), N, SVA,
- & N, U, LDU, WORK(N+N*N+1), LWORK-N-N*N, INFO )
+ $ N, U, LDU, WORK(N+N*N+1), LWORK-N-N*N, INFO )
*
SCALEM = WORK(N+N*N+1)
NUMRANK = NINT(WORK(N+N*N+2))
@@ -1485,7 +1483,7 @@
6970 CONTINUE
*
CALL STRSM( 'Left', 'Upper', 'NoTrans', 'No UD', N, N,
- & ONE, A, LDA, WORK(N+1), N )
+ $ ONE, A, LDA, WORK(N+1), N )
DO 6972 p = 1, N
CALL SCOPY( N, WORK(N+p), N, V(IWORK(p),1), LDV )
6972 CONTINUE
@@ -1493,7 +1491,7 @@
DO 6971 p = 1, N
XSC = ONE / SNRM2( N, V(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL SSCAL( N, XSC, V(1,p), 1 )
+ $ CALL SSCAL( N, XSC, V(1,p), 1 )
6971 CONTINUE
*
* Assemble the left singular vector matrix U (M x N).
@@ -1506,16 +1504,16 @@
END IF
END IF
CALL SORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
TEMP1 = SQRT(FLOAT(M))*EPSLN
DO 6973 p = 1, N1
XSC = ONE / SNRM2( M, U(1,p), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL SSCAL( M, XSC, U(1,p), 1 )
+ $ CALL SSCAL( M, XSC, U(1,p), 1 )
6973 CONTINUE
*
IF ( ROWPIV )
- & CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
END IF
*
@@ -1543,9 +1541,9 @@
TEMP1 = XSC*ABS( V(q,q) )
DO 5968 p = 1, N
IF ( ( p .GT. q ) .AND. ( ABS(V(p,q)) .LE. TEMP1 )
- & .OR. ( p .LT. q ) )
- & V(p,q) = SIGN( TEMP1, V(p,q) )
- IF ( p. LT. q ) V(p,q) = - V(p,q)
+ $ .OR. ( p .LT. q ) )
+ $ V(p,q) = SIGN( TEMP1, V(p,q) )
+ IF ( p .LT. q ) V(p,q) = - V(p,q)
5968 CONTINUE
5969 CONTINUE
ELSE
@@ -1553,7 +1551,7 @@
END IF
CALL SGEQRF( N, NR, V, LDV, WORK(N+1), WORK(2*N+1),
- & LWORK-2*N, IERR )
+ $ LWORK-2*N, IERR )
CALL SLACPY( 'L', N, NR, V, LDV, WORK(2*N+1), N )
*
DO 7969 p = 1, NR
@@ -1573,7 +1571,7 @@
END IF
CALL SGESVJ( 'L', 'U', 'V', NR, NR, U, LDU, SVA,
- & N, V, LDV, WORK(2*N+N*NR+1), LWORK-2*N-N*NR, INFO )
+ $ N, V, LDV, WORK(2*N+N*NR+1), LWORK-2*N-N*NR, INFO )
SCALEM = WORK(2*N+N*NR+1)
NUMRANK = NINT(WORK(2*N+N*NR+2))
@@ -1584,7 +1582,7 @@
END IF
CALL SORMQR( 'L','N',N,N,NR,WORK(2*N+1),N,WORK(N+1),
- & V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
+ $ V,LDV,WORK(2*N+N*NR+NR+1),LWORK-2*N-N*NR-NR,IERR )
*
* Permute the rows of V using the (column) permutation from the
* first QRF. Also, scale the columns to make them unit in
@@ -1600,7 +1598,7 @@
8973 CONTINUE
XSC = ONE / SNRM2( N, V(1,q), 1 )
IF ( (XSC .LT. (ONE-TEMP1)) .OR. (XSC .GT. (ONE+TEMP1)) )
- & CALL SSCAL( N, XSC, V(1,q), 1 )
+ $ CALL SSCAL( N, XSC, V(1,q), 1 )
7972 CONTINUE
*
* At this moment, V contains the right singular vectors of A.
@@ -1615,10 +1613,10 @@
END IF
*
CALL SORMQR( 'Left', 'No Tr', M, N1, N, A, LDA, WORK, U,
- & LDU, WORK(N+1), LWORK-N, IERR )
+ $ LDU, WORK(N+1), LWORK-N, IERR )
*
IF ( ROWPIV )
- & CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
+ $ CALL SLASWP( N1, U, LDU, 1, M-1, IWORK(2*N+1), -1 )
*
*
END IF
diff --git a/SRC/sgesvj.f b/SRC/sgesvj.f
index f5fa3978..32798519 100644
--- a/SRC/sgesvj.f
+++ b/SRC/sgesvj.f
@@ -1,5 +1,5 @@
SUBROUTINE SGESVJ( JOBA, JOBU, JOBV, M, N, A, LDA, SVA, MV, V,
- + LDV, WORK, LWORK, INFO )
+ $ LDV, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -23,7 +23,7 @@
* ..
* .. Array Arguments ..
REAL A( LDA, * ), SVA( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -256,22 +256,22 @@
* .. Local Parameters ..
REAL ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0E0, HALF = 0.5E0, ONE = 1.0E0,
- + TWO = 2.0E0 )
+ $ TWO = 2.0E0 )
INTEGER NSWEEP
PARAMETER ( NSWEEP = 30 )
* ..
* .. Local Scalars ..
REAL AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, CTOL, EPSLN, LARGE, MXAAPQ,
- + MXSINJ, ROOTBIG, ROOTEPS, ROOTSFMIN, ROOTTOL,
- + SKL, SFMIN, SMALL, SN, T, TEMP1, THETA,
- + THSIGN, TOL
+ $ BIGTHETA, CS, CTOL, EPSLN, LARGE, MXAAPQ,
+ $ MXSINJ, ROOTBIG, ROOTEPS, ROOTSFMIN, ROOTTOL,
+ $ SKL, SFMIN, SMALL, SN, T, TEMP1, THETA,
+ $ THSIGN, TOL
INTEGER BLSKIP, EMPTSW, i, ibr, IERR, igl, IJBLSK, ir1,
- + ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, N2, N34,
- + N4, NBL, NOTROT, p, PSKIPPED, q, ROWSKIP,
- + SWBAND
+ $ ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, N2, N34,
+ $ N4, NBL, NOTROT, p, PSKIPPED, q, ROWSKIP,
+ $ SWBAND
LOGICAL APPLV, GOSCALE, LOWER, LSVEC, NOSCALE, ROTOK,
- + RSVEC, UCTOL, UPPER
+ $ RSVEC, UCTOL, UPPER
* ..
* .. Local Arrays ..
REAL FASTR( 5 )
@@ -327,7 +327,7 @@
ELSE IF( MV.LT.0 ) THEN
INFO = -9
ELSE IF( ( RSVEC .AND. ( LDV.LT.N ) ) .OR.
- + ( APPLV .AND. ( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV .AND. ( LDV.LT.MV ) ) ) THEN
INFO = -11
ELSE IF( UCTOL .AND. ( WORK( 1 ).LE.ONE ) ) THEN
INFO = -12
@@ -518,7 +518,7 @@
*
IF( N.EQ.1 ) THEN
IF( LSVEC )CALL SLASCL( 'G', 0, 0, SVA( 1 ), SKL, M, 1,
- + A( 1, 1 ), LDA, IERR )
+ $ A( 1, 1 ), LDA, IERR )
WORK( 1 ) = ONE / SKL
IF( SVA( 1 ).GE.SFMIN ) THEN
WORK( 2 ) = ONE
@@ -538,7 +538,7 @@
SN = SQRT( SFMIN / EPSLN )
TEMP1 = SQRT( BIG / FLOAT( N ) )
IF( ( AAPP.LE.SN ) .OR. ( AAQQ.GE.TEMP1 ) .OR.
- + ( ( SN.LE.AAQQ ) .AND. ( AAPP.LE.TEMP1 ) ) ) THEN
+ $ ( ( SN.LE.AAQQ ) .AND. ( AAPP.LE.TEMP1 ) ) ) THEN
TEMP1 = AMIN1( BIG, TEMP1 / AAPP )
* AAQQ = AAQQ*TEMP1
* AAPP = AAPP*TEMP1
@@ -638,54 +638,54 @@
* [+ + x x] [x x]. [x x]
*
CALL SGSVJ0( JOBV, M-N34, N-N34, A( N34+1, N34+1 ), LDA,
- + WORK( N34+1 ), SVA( N34+1 ), MVL,
- + V( N34*q+1, N34+1 ), LDV, EPSLN, SFMIN, TOL,
- + 2, WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N34+1 ), SVA( N34+1 ), MVL,
+ $ V( N34*q+1, N34+1 ), LDV, EPSLN, SFMIN, TOL,
+ $ 2, WORK( N+1 ), LWORK-N, IERR )
*
CALL SGSVJ0( JOBV, M-N2, N34-N2, A( N2+1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 2,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 2,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL SGSVJ1( JOBV, M-N2, N-N2, N4, A( N2+1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL SGSVJ0( JOBV, M-N4, N2-N4, A( N4+1, N4+1 ), LDA,
- + WORK( N4+1 ), SVA( N4+1 ), MVL,
- + V( N4*q+1, N4+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N4+1 ), SVA( N4+1 ), MVL,
+ $ V( N4*q+1, N4+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
*
CALL SGSVJ0( JOBV, M, N4, A, LDA, WORK, SVA, MVL, V, LDV,
- + EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL SGSVJ1( JOBV, M, N2, N4, A, LDA, WORK, SVA, MVL, V,
- + LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
- + LWORK-N, IERR )
+ $ LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
+ $ LWORK-N, IERR )
*
*
ELSE IF( UPPER ) THEN
*
*
CALL SGSVJ0( JOBV, N4, N4, A, LDA, WORK, SVA, MVL, V, LDV,
- + EPSLN, SFMIN, TOL, 2, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ EPSLN, SFMIN, TOL, 2, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL SGSVJ0( JOBV, N2, N4, A( 1, N4+1 ), LDA, WORK( N4+1 ),
- + SVA( N4+1 ), MVL, V( N4*q+1, N4+1 ), LDV,
- + EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
- + IERR )
+ $ SVA( N4+1 ), MVL, V( N4*q+1, N4+1 ), LDV,
+ $ EPSLN, SFMIN, TOL, 1, WORK( N+1 ), LWORK-N,
+ $ IERR )
*
CALL SGSVJ1( JOBV, N2, N2, N4, A, LDA, WORK, SVA, MVL, V,
- + LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
- + LWORK-N, IERR )
+ $ LDV, EPSLN, SFMIN, TOL, 1, WORK( N+1 ),
+ $ LWORK-N, IERR )
*
CALL SGSVJ0( JOBV, N2+N4, N4, A( 1, N2+1 ), LDA,
- + WORK( N2+1 ), SVA( N2+1 ), MVL,
- + V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
- + WORK( N+1 ), LWORK-N, IERR )
+ $ WORK( N2+1 ), SVA( N2+1 ), MVL,
+ $ V( N2*q+1, N2+1 ), LDV, EPSLN, SFMIN, TOL, 1,
+ $ WORK( N+1 ), LWORK-N, IERR )
END IF
*
@@ -725,7 +725,7 @@
IF( p.NE.q ) THEN
CALL SSWAP( M, A( 1, p ), 1, A( 1, q ), 1 )
IF( RSVEC )CALL SSWAP( MVL, V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, q ), 1 )
TEMP1 = SVA( p )
SVA( p ) = SVA( q )
SVA( q ) = TEMP1
@@ -749,7 +749,7 @@
* below should read "AAPP = SNRM2( M, A(1,p), 1 ) * WORK(p)".
*
IF( ( SVA( p ).LT.ROOTBIG ) .AND.
- + ( SVA( p ).GT.ROOTSFMIN ) ) THEN
+ $ ( SVA( p ).GT.ROOTSFMIN ) ) THEN
SVA( p ) = SNRM2( M, A( 1, p ), 1 )*WORK( p )
ELSE
TEMP1 = ZERO
@@ -777,31 +777,31 @@
ROTOK = ( SMALL*AAPP ).LE.AAQQ
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL SCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAPP,
- + WORK( p ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( p ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = SDOT( M, WORK( N+1 ), 1,
- + A( 1, q ), 1 )*WORK( q ) / AAQQ
+ $ A( 1, q ), 1 )*WORK( q ) / AAQQ
END IF
ELSE
ROTOK = AAPP.LE.( AAQQ / SMALL )
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL SCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAQQ,
- + WORK( q ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( q ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = SDOT( M, WORK( N+1 ), 1,
- + A( 1, p ), 1 )*WORK( p ) / AAPP
+ $ A( 1, p ), 1 )*WORK( p ) / AAPP
END IF
END IF
*
@@ -831,17 +831,17 @@
T = HALF / THETA
FASTR( 3 ) = T*WORK( p ) / WORK( q )
FASTR( 4 ) = -T*WORK( q ) /
- + WORK( p )
+ $ WORK( p )
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, ABS( T ) )
*
ELSE
@@ -850,15 +850,15 @@
*
THSIGN = -SIGN( ONE, AAPQ )
T = ONE / ( THETA+THSIGN*
- + SQRT( ONE+THETA*THETA ) )
+ $ SQRT( ONE+THETA*THETA ) )
CS = SQRT( ONE / ( ONE+T*T ) )
SN = T*CS
*
MXSINJ = AMAX1( MXSINJ, ABS( SN ) )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = WORK( p ) / WORK( q )
AQOAP = WORK( q ) / WORK( p )
@@ -869,88 +869,88 @@
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q )*CS
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
END IF
ELSE
IF( WORK( q ).GE.ONE ) THEN
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
ELSE
IF( WORK( p ).GE.WORK( q ) )
- + THEN
+ $ THEN
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -960,19 +960,19 @@
ELSE
* .. have to use modified Gram-Schmidt like transformation
CALL SCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAPP, ONE, M,
- + 1, WORK( N+1 ), LDA,
- + IERR )
+ $ 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
TEMP1 = -AAPQ*WORK( p ) / WORK( q )
CALL SAXPY( M, TEMP1, WORK( N+1 ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAQQ, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
END IF
* END IF ROTOK THEN ... ELSE
@@ -981,29 +981,29 @@
* recompute SVA(q), SVA(p).
*
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = SNRM2( M, A( 1, q ), 1 )*
- + WORK( q )
+ $ WORK( q )
ELSE
T = ZERO
AAQQ = ONE
CALL SLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*SQRT( AAQQ )*WORK( q )
END IF
END IF
IF( ( AAPP / AAPP0 ).LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = SNRM2( M, A( 1, p ), 1 )*
- + WORK( p )
+ $ WORK( p )
ELSE
T = ZERO
AAPP = ONE
CALL SLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*SQRT( AAPP )*WORK( p )
END IF
SVA( p ) = AAPP
@@ -1022,7 +1022,7 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
IF( ir1.EQ.0 )AAPP = -AAPP
NOTROT = 0
GO TO 2103
@@ -1039,7 +1039,7 @@
ELSE
SVA( p ) = AAPP
IF( ( ir1.EQ.0 ) .AND. ( AAPP.EQ.ZERO ) )
- + NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
+ $ NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
END IF
*
2001 CONTINUE
@@ -1084,16 +1084,16 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL SCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAPP,
- + WORK( p ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( p ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = SDOT( M, WORK( N+1 ), 1,
- + A( 1, q ), 1 )*WORK( q ) / AAQQ
+ $ A( 1, q ), 1 )*WORK( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -1103,16 +1103,16 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*WORK( p )*WORK( q ) /
- + AAQQ ) / AAPP
+ $ q ), 1 )*WORK( p )*WORK( q ) /
+ $ AAQQ ) / AAPP
ELSE
CALL SCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAQQ,
- + WORK( q ), M, 1,
- + WORK( N+1 ), LDA, IERR )
+ $ WORK( q ), M, 1,
+ $ WORK( N+1 ), LDA, IERR )
AAPQ = SDOT( M, WORK( N+1 ), 1,
- + A( 1, p ), 1 )*WORK( p ) / AAPP
+ $ A( 1, p ), 1 )*WORK( p ) / AAPP
END IF
END IF
*
@@ -1137,17 +1137,17 @@
T = HALF / THETA
FASTR( 3 ) = T*WORK( p ) / WORK( q )
FASTR( 4 ) = -T*WORK( q ) /
- + WORK( p )
+ $ WORK( p )
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, ABS( T ) )
ELSE
*
@@ -1156,14 +1156,14 @@
THSIGN = -SIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + SQRT( ONE+THETA*THETA ) )
+ $ SQRT( ONE+THETA*THETA ) )
CS = SQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = AMAX1( MXSINJ, ABS( SN ) )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = WORK( p ) / WORK( q )
AQOAP = WORK( q ) / WORK( p )
@@ -1175,26 +1175,26 @@
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q )*CS
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
@@ -1202,61 +1202,61 @@
ELSE
IF( WORK( q ).GE.ONE ) THEN
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
ELSE
IF( WORK( p ).GE.WORK( q ) )
- + THEN
+ $ THEN
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
WORK( p ) = WORK( p )*CS
WORK( q ) = WORK( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
WORK( p ) = WORK( p ) / CS
WORK( q ) = WORK( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -1266,39 +1266,39 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL SCOPY( M, A( 1, p ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK( N+1 ), LDA,
- + IERR )
+ $ M, 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*WORK( p ) / WORK( q )
CALL SAXPY( M, TEMP1, WORK( N+1 ),
- + 1, A( 1, q ), 1 )
+ $ 1, A( 1, q ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
ELSE
CALL SCOPY( M, A( 1, q ), 1,
- + WORK( N+1 ), 1 )
+ $ WORK( N+1 ), 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK( N+1 ), LDA,
- + IERR )
+ $ M, 1, WORK( N+1 ), LDA,
+ $ IERR )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*WORK( q ) / WORK( p )
CALL SAXPY( M, TEMP1, WORK( N+1 ),
- + 1, A( 1, p ), 1 )
+ $ 1, A( 1, p ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -1307,29 +1307,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = SNRM2( M, A( 1, q ), 1 )*
- + WORK( q )
+ $ WORK( q )
ELSE
T = ZERO
AAQQ = ONE
CALL SLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*SQRT( AAQQ )*WORK( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = SNRM2( M, A( 1, p ), 1 )*
- + WORK( p )
+ $ WORK( p )
ELSE
T = ZERO
AAPP = ONE
CALL SLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*SQRT( AAPP )*WORK( p )
END IF
SVA( p ) = AAPP
@@ -1348,13 +1348,13 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -1369,7 +1369,7 @@
ELSE
*
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
*
END IF
@@ -1389,7 +1389,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = SNRM2( M, A( 1, N ), 1 )*WORK( N )
ELSE
T = ZERO
@@ -1401,10 +1401,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
*
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.SQRT( FLOAT( N ) )*
- + TOL ) .AND. ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ TOL ) .AND. ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
*
@@ -1477,8 +1477,8 @@
*
* Undo scaling, if necessary (and possible).
IF( ( ( SKL.GT.ONE ) .AND. ( SVA( 1 ).LT.( BIG /
- + SKL ) ) ) .OR. ( ( SKL.LT.ONE ) .AND. ( SVA( N2 ).GT.
- + ( SFMIN / SKL ) ) ) ) THEN
+ $ SKL ) ) ) .OR. ( ( SKL.LT.ONE ) .AND. ( SVA( N2 ).GT.
+ $ ( SFMIN / SKL ) ) ) ) THEN
DO 2400 p = 1, N
SVA( p ) = SKL*SVA( p )
2400 CONTINUE
diff --git a/SRC/sgsvj0.f b/SRC/sgsvj0.f
index 07c4d030..3e0ed57e 100644
--- a/SRC/sgsvj0.f
+++ b/SRC/sgsvj0.f
@@ -1,5 +1,5 @@
SUBROUTINE SGSVJ0( JOBV, M, N, A, LDA, D, SVA, MV, V, LDV, EPS,
- + SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
+ $ SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.1) --
*
@@ -24,7 +24,7 @@
* ..
* .. Array Arguments ..
REAL A( LDA, * ), SVA( N ), D( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -145,16 +145,16 @@
* .. Local Parameters ..
REAL ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0E0, HALF = 0.5E0, ONE = 1.0E0,
- + TWO = 2.0E0 )
+ $ TWO = 2.0E0 )
* ..
* .. Local Scalars ..
REAL AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, MXAAPQ, MXSINJ, ROOTBIG, ROOTEPS,
- + ROOTSFMIN, ROOTTOL, SMALL, SN, T, TEMP1, THETA,
- + THSIGN
+ $ BIGTHETA, CS, MXAAPQ, MXSINJ, ROOTBIG, ROOTEPS,
+ $ ROOTSFMIN, ROOTTOL, SMALL, SN, T, TEMP1, THETA,
+ $ THSIGN
INTEGER BLSKIP, EMPTSW, i, ibr, IERR, igl, IJBLSK, ir1,
- + ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, NBL,
- + NOTROT, p, PSKIPPED, q, ROWSKIP, SWBAND
+ $ ISWROT, jbc, jgl, KBL, LKAHEAD, MVL, NBL,
+ $ NOTROT, p, PSKIPPED, q, ROWSKIP, SWBAND
LOGICAL APPLV, ROTOK, RSVEC
* ..
* .. Local Arrays ..
@@ -189,7 +189,7 @@
ELSE IF( ( RSVEC.OR.APPLV ) .AND. ( MV.LT.0 ) ) THEN
INFO = -8
ELSE IF( ( RSVEC.AND.( LDV.LT.N ) ).OR.
- & ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
INFO = -10
ELSE IF( TOL.LE.EPS ) THEN
INFO = -13
@@ -282,7 +282,7 @@
IF( p.NE.q ) THEN
CALL SSWAP( M, A( 1, p ), 1, A( 1, q ), 1 )
IF( RSVEC )CALL SSWAP( MVL, V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, q ), 1 )
TEMP1 = SVA( p )
SVA( p ) = SVA( q )
SVA( q ) = TEMP1
@@ -306,7 +306,7 @@
* below should read "AAPP = SNRM2( M, A(1,p), 1 ) * D(p)".
*
IF( ( SVA( p ).LT.ROOTBIG ) .AND.
- + ( SVA( p ).GT.ROOTSFMIN ) ) THEN
+ $ ( SVA( p ).GT.ROOTSFMIN ) ) THEN
SVA( p ) = SNRM2( M, A( 1, p ), 1 )*D( p )
ELSE
TEMP1 = ZERO
@@ -335,27 +335,27 @@
ROTOK = ( SMALL*AAPP ).LE.AAQQ
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
ROTOK = AAPP.LE.( AAQQ / SMALL )
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
*
@@ -386,15 +386,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, ABS( T ) )
*
ELSE
@@ -403,15 +403,15 @@
*
THSIGN = -SIGN( ONE, AAPQ )
T = ONE / ( THETA+THSIGN*
- + SQRT( ONE+THETA*THETA ) )
+ $ SQRT( ONE+THETA*THETA ) )
CS = SQRT( ONE / ( ONE+T*T ) )
SN = T*CS
*
MXSINJ = AMAX1( MXSINJ, ABS( SN ) )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -422,87 +422,87 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
END IF
ELSE
IF( D( q ).GE.ONE ) THEN
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -513,16 +513,16 @@
* .. have to use modified Gram-Schmidt like transformation
CALL SCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAPP, ONE, M,
- + 1, WORK, LDA, IERR )
+ $ 1, WORK, LDA, IERR )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL SAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAQQ, M,
- + 1, A( 1, q ), LDA, IERR )
+ $ 1, A( 1, q ), LDA, IERR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
END IF
* END IF ROTOK THEN ... ELSE
@@ -530,29 +530,29 @@
* In the case of cancellation in updating SVA(q), SVA(p)
* recompute SVA(q), SVA(p).
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = SNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL SLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*SQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 ).LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = SNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL SLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*SQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -570,7 +570,7 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
IF( ir1.EQ.0 )AAPP = -AAPP
NOTROT = 0
GO TO 2103
@@ -587,7 +587,7 @@
ELSE
SVA( p ) = AAPP
IF( ( ir1.EQ.0 ) .AND. ( AAPP.EQ.ZERO ) )
- + NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
+ $ NOTROT = NOTROT + MIN0( igl+KBL-1, N ) - p
END IF
*
2001 CONTINUE
@@ -635,14 +635,14 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -652,14 +652,14 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
*
@@ -685,15 +685,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, ABS( T ) )
ELSE
*
@@ -702,14 +702,14 @@
THSIGN = -SIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + SQRT( ONE+THETA*THETA ) )
+ $ SQRT( ONE+THETA*THETA ) )
CS = SQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = AMAX1( MXSINJ, ABS( SN ) )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
*
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -721,26 +721,26 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
@@ -748,60 +748,60 @@
ELSE
IF( D( q ).GE.ONE ) THEN
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -811,37 +811,37 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL SCOPY( M, A( 1, p ), 1, WORK,
- + 1 )
+ $ 1 )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL SAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
ELSE
CALL SCOPY( M, A( 1, q ), 1, WORK,
- + 1 )
+ $ 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( q ) / D( p )
CALL SAXPY( M, TEMP1, WORK, 1,
- + A( 1, p ), 1 )
+ $ A( 1, p ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -850,29 +850,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = SNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL SLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*SQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = SNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL SLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*SQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -890,13 +890,13 @@
END IF
*
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -910,7 +910,7 @@
*
ELSE
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
END IF
@@ -929,7 +929,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = SNRM2( M, A( 1, N ), 1 )*D( N )
ELSE
T = ZERO
@@ -941,10 +941,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
*
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.FLOAT( N )*TOL ) .AND.
- + ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
*
diff --git a/SRC/sgsvj1.f b/SRC/sgsvj1.f
index efa09248..bb36ea15 100644
--- a/SRC/sgsvj1.f
+++ b/SRC/sgsvj1.f
@@ -1,5 +1,5 @@
SUBROUTINE SGSVJ1( JOBV, M, N, N1, A, LDA, D, SVA, MV, V, LDV,
- + EPS, SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
+ $ EPS, SFMIN, TOL, NSWEEP, WORK, LWORK, INFO )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -24,7 +24,7 @@
* ..
* .. Array Arguments ..
REAL A( LDA, * ), D( N ), SVA( N ), V( LDV, * ),
- + WORK( LWORK )
+ $ WORK( LWORK )
* ..
*
* Purpose
@@ -44,12 +44,12 @@
* block-entries (tiles) of the (1,2) off-diagonal block are marked by the
* [x]'s in the following scheme:
*
-* | * * * [x] [x] [x]|
-* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
-* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
+* | * C * [x] [x] [x]|
+* | * C * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
+* | * C * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
*
* In terms of the columns of A, the first N1 columns are rotated 'against'
* the remaining N-N1 columns, trying to increase the angle between the
@@ -162,16 +162,16 @@
* .. Local Parameters ..
REAL ZERO, HALF, ONE, TWO
PARAMETER ( ZERO = 0.0E0, HALF = 0.5E0, ONE = 1.0E0,
- + TWO = 2.0E0 )
+ $ TWO = 2.0E0 )
* ..
* .. Local Scalars ..
REAL AAPP, AAPP0, AAPQ, AAQQ, APOAQ, AQOAP, BIG,
- + BIGTHETA, CS, LARGE, MXAAPQ, MXSINJ, ROOTBIG,
- + ROOTEPS, ROOTSFMIN, ROOTTOL, SMALL, SN, T,
- + TEMP1, THETA, THSIGN
+ $ BIGTHETA, CS, LARGE, MXAAPQ, MXSINJ, ROOTBIG,
+ $ ROOTEPS, ROOTSFMIN, ROOTTOL, SMALL, SN, T,
+ $ TEMP1, THETA, THSIGN
INTEGER BLSKIP, EMPTSW, i, ibr, igl, IERR, IJBLSK,
- + ISWROT, jbc, jgl, KBL, MVL, NOTROT, nblc, nblr,
- + p, PSKIPPED, q, ROWSKIP, SWBAND
+ $ ISWROT, jbc, jgl, KBL, MVL, NOTROT, nblc, nblr,
+ $ p, PSKIPPED, q, ROWSKIP, SWBAND
LOGICAL APPLV, ROTOK, RSVEC
* ..
* .. Local Arrays ..
@@ -208,7 +208,7 @@
ELSE IF( ( RSVEC.OR.APPLV ) .AND. ( MV.LT.0 ) ) THEN
INFO = -9
ELSE IF( ( RSVEC.AND.( LDV.LT.N ) ).OR.
- & ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
+ $ ( APPLV.AND.( LDV.LT.MV ) ) ) THEN
INFO = -11
ELSE IF( TOL.LE.EPS ) THEN
INFO = -14
@@ -271,12 +271,12 @@
* Jacobi SVD algorithm SGESVJ.
*
*
-* | * * * [x] [x] [x]|
-* | * * * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
-* | * * * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
-* |[x] [x] [x] * * * |
+* | * C * [x] [x] [x]|
+* | * C * [x] [x] [x]| Row-cycling in the nblr-by-nblc [x] blocks.
+* | * C * [x] [x] [x]| Row-cyclic pivoting inside each [x] block.
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
+* |[x] [x] [x] * C * |
*
*
DO 1993 i = 1, NSWEEP
@@ -333,14 +333,14 @@
END IF
IF( AAPP.LT.( BIG / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, p ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAPP, D( p ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, q ),
- + 1 )*D( q ) / AAQQ
+ $ 1 )*D( q ) / AAQQ
END IF
ELSE
IF( AAPP.GE.AAQQ ) THEN
@@ -350,14 +350,14 @@
END IF
IF( AAPP.GT.( SMALL / AAQQ ) ) THEN
AAPQ = ( SDOT( M, A( 1, p ), 1, A( 1,
- + q ), 1 )*D( p )*D( q ) / AAQQ )
- + / AAPP
+ $ q ), 1 )*D( p )*D( q ) / AAQQ )
+ $ / AAPP
ELSE
CALL SCOPY( M, A( 1, q ), 1, WORK, 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, D( q ),
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
AAPQ = SDOT( M, WORK, 1, A( 1, p ),
- + 1 )*D( p ) / AAPP
+ $ 1 )*D( p ) / AAPP
END IF
END IF
@@ -383,15 +383,15 @@
FASTR( 3 ) = T*D( p ) / D( q )
FASTR( 4 ) = -T*D( q ) / D( p )
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1, FASTR )
+ $ A( 1, q ), 1, FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1,
- + V( 1, q ), 1,
- + FASTR )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1,
+ $ FASTR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, ABS( T ) )
ELSE
*
@@ -400,14 +400,14 @@
THSIGN = -SIGN( ONE, AAPQ )
IF( AAQQ.GT.AAPP0 )THSIGN = -THSIGN
T = ONE / ( THETA+THSIGN*
- + SQRT( ONE+THETA*THETA ) )
+ $ SQRT( ONE+THETA*THETA ) )
CS = SQRT( ONE / ( ONE+T*T ) )
SN = T*CS
MXSINJ = AMAX1( MXSINJ, ABS( SN ) )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE+T*APOAQ*AAPQ ) )
+ $ ONE+T*APOAQ*AAPQ ) )
AAPP = AAPP*SQRT( AMAX1( ZERO,
- + ONE-T*AQOAP*AAPQ ) )
+ $ ONE-T*AQOAP*AAPQ ) )
APOAQ = D( p ) / D( q )
AQOAP = D( q ) / D( p )
@@ -419,26 +419,26 @@
D( p ) = D( p )*CS
D( q ) = D( q )*CS
CALL SROTM( M, A( 1, p ), 1,
- + A( 1, q ), 1,
- + FASTR )
+ $ A( 1, q ), 1,
+ $ FASTR )
IF( RSVEC )CALL SROTM( MVL,
- + V( 1, p ), 1, V( 1, q ),
- + 1, FASTR )
+ $ V( 1, p ), 1, V( 1, q ),
+ $ 1, FASTR )
ELSE
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
@@ -446,60 +446,60 @@
ELSE
IF( D( q ).GE.ONE ) THEN
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M, -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
IF( RSVEC ) THEN
CALL SAXPY( MVL, T*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
ELSE
IF( D( p ).GE.D( q ) ) THEN
CALL SAXPY( M, -T*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
CALL SAXPY( M, CS*SN*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
D( p ) = D( p )*CS
D( q ) = D( q ) / CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + -T*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -T*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
CALL SAXPY( MVL,
- + CS*SN*APOAQ,
- + V( 1, p ), 1,
- + V( 1, q ), 1 )
+ $ CS*SN*APOAQ,
+ $ V( 1, p ), 1,
+ $ V( 1, q ), 1 )
END IF
ELSE
CALL SAXPY( M, T*APOAQ,
- + A( 1, p ), 1,
- + A( 1, q ), 1 )
+ $ A( 1, p ), 1,
+ $ A( 1, q ), 1 )
CALL SAXPY( M,
- + -CS*SN*AQOAP,
- + A( 1, q ), 1,
- + A( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ A( 1, q ), 1,
+ $ A( 1, p ), 1 )
D( p ) = D( p ) / CS
D( q ) = D( q )*CS
IF( RSVEC ) THEN
CALL SAXPY( MVL,
- + T*APOAQ, V( 1, p ),
- + 1, V( 1, q ), 1 )
+ $ T*APOAQ, V( 1, p ),
+ $ 1, V( 1, q ), 1 )
CALL SAXPY( MVL,
- + -CS*SN*AQOAP,
- + V( 1, q ), 1,
- + V( 1, p ), 1 )
+ $ -CS*SN*AQOAP,
+ $ V( 1, q ), 1,
+ $ V( 1, p ), 1 )
END IF
END IF
END IF
@@ -509,37 +509,37 @@
ELSE
IF( AAPP.GT.AAQQ ) THEN
CALL SCOPY( M, A( 1, p ), 1, WORK,
- + 1 )
+ $ 1 )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( p ) / D( q )
CALL SAXPY( M, TEMP1, WORK, 1,
- + A( 1, q ), 1 )
+ $ A( 1, q ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAQQ,
- + M, 1, A( 1, q ), LDA,
- + IERR )
+ $ M, 1, A( 1, q ), LDA,
+ $ IERR )
SVA( q ) = AAQQ*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
ELSE
CALL SCOPY( M, A( 1, q ), 1, WORK,
- + 1 )
+ $ 1 )
CALL SLASCL( 'G', 0, 0, AAQQ, ONE,
- + M, 1, WORK, LDA, IERR )
+ $ M, 1, WORK, LDA, IERR )
CALL SLASCL( 'G', 0, 0, AAPP, ONE,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
TEMP1 = -AAPQ*D( q ) / D( p )
CALL SAXPY( M, TEMP1, WORK, 1,
- + A( 1, p ), 1 )
+ $ A( 1, p ), 1 )
CALL SLASCL( 'G', 0, 0, ONE, AAPP,
- + M, 1, A( 1, p ), LDA,
- + IERR )
+ $ M, 1, A( 1, p ), LDA,
+ $ IERR )
SVA( p ) = AAPP*SQRT( AMAX1( ZERO,
- + ONE-AAPQ*AAPQ ) )
+ $ ONE-AAPQ*AAPQ ) )
MXSINJ = AMAX1( MXSINJ, SFMIN )
END IF
END IF
@@ -548,29 +548,29 @@
* In the case of cancellation in updating SVA(q)
* .. recompute SVA(q)
IF( ( SVA( q ) / AAQQ )**2.LE.ROOTEPS )
- + THEN
+ $ THEN
IF( ( AAQQ.LT.ROOTBIG ) .AND.
- + ( AAQQ.GT.ROOTSFMIN ) ) THEN
+ $ ( AAQQ.GT.ROOTSFMIN ) ) THEN
SVA( q ) = SNRM2( M, A( 1, q ), 1 )*
- + D( q )
+ $ D( q )
ELSE
T = ZERO
AAQQ = ONE
CALL SLASSQ( M, A( 1, q ), 1, T,
- + AAQQ )
+ $ AAQQ )
SVA( q ) = T*SQRT( AAQQ )*D( q )
END IF
END IF
IF( ( AAPP / AAPP0 )**2.LE.ROOTEPS ) THEN
IF( ( AAPP.LT.ROOTBIG ) .AND.
- + ( AAPP.GT.ROOTSFMIN ) ) THEN
+ $ ( AAPP.GT.ROOTSFMIN ) ) THEN
AAPP = SNRM2( M, A( 1, p ), 1 )*
- + D( p )
+ $ D( p )
ELSE
T = ZERO
AAPP = ONE
CALL SLASSQ( M, A( 1, p ), 1, T,
- + AAPP )
+ $ AAPP )
AAPP = T*SQRT( AAPP )*D( p )
END IF
SVA( p ) = AAPP
@@ -590,13 +590,13 @@
* IF ( NOTROT .GE. EMPTSW ) GO TO 2011
IF( ( i.LE.SWBAND ) .AND. ( IJBLSK.GE.BLSKIP ) )
- + THEN
+ $ THEN
SVA( p ) = AAPP
NOTROT = 0
GO TO 2011
END IF
IF( ( i.LE.SWBAND ) .AND.
- + ( PSKIPPED.GT.ROWSKIP ) ) THEN
+ $ ( PSKIPPED.GT.ROWSKIP ) ) THEN
AAPP = -AAPP
NOTROT = 0
GO TO 2203
@@ -611,7 +611,7 @@
*
ELSE
IF( AAPP.EQ.ZERO )NOTROT = NOTROT +
- + MIN0( jgl+KBL-1, N ) - jgl + 1
+ $ MIN0( jgl+KBL-1, N ) - jgl + 1
IF( AAPP.LT.ZERO )NOTROT = 0
*** IF ( NOTROT .GE. EMPTSW ) GO TO 2011
END IF
@@ -631,7 +631,7 @@
*
* .. update SVA(N)
IF( ( SVA( N ).LT.ROOTBIG ) .AND. ( SVA( N ).GT.ROOTSFMIN ) )
- + THEN
+ $ THEN
SVA( N ) = SNRM2( M, A( 1, N ), 1 )*D( N )
ELSE
T = ZERO
@@ -643,10 +643,10 @@
* Additional steering devices
*
IF( ( i.LT.SWBAND ) .AND. ( ( MXAAPQ.LE.ROOTTOL ) .OR.
- + ( ISWROT.LE.N ) ) )SWBAND = i
+ $ ( ISWROT.LE.N ) ) )SWBAND = i
IF( ( i.GT.SWBAND+1 ) .AND. ( MXAAPQ.LT.FLOAT( N )*TOL ) .AND.
- + ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
+ $ ( FLOAT( N )*MXAAPQ*MXSINJ.LT.TOL ) ) THEN
GO TO 1994
END IF
diff --git a/SRC/slag2d.f b/SRC/slag2d.f
index 39b09d6e..888b3181 100644
--- a/SRC/slag2d.f
+++ b/SRC/slag2d.f
@@ -49,7 +49,8 @@
*
* INFO (output) INTEGER
* = 0: successful exit
-* =========
+*
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
diff --git a/SRC/slansf.f b/SRC/slansf.f
index 6d1373b7..8ec70f3c 100644
--- a/SRC/slansf.f
+++ b/SRC/slansf.f
@@ -194,19 +194,19 @@
*
NOE = 1
IF( MOD( N, 2 ).EQ.0 )
- + NOE = 0
+ $ NOE = 0
*
* set ifm = 0 when form='T or 't' and 1 otherwise
*
IFM = 1
IF( LSAME( TRANSR, 'T' ) )
- + IFM = 0
+ $ IFM = 0
*
* set ilu = 0 when uplo='U or 'u' and 1 otherwise
*
ILU = 1
IF( LSAME( UPLO, 'U' ) )
- + ILU = 0
+ $ ILU = 0
*
* set lda = (n+1)/2 when ifm = 0
* set lda = n when ifm = 1 and noe = 1
@@ -266,7 +266,7 @@
END IF
END IF
ELSE IF( ( LSAME( NORM, 'I' ) ) .OR. ( LSAME( NORM, 'O' ) ) .OR.
- + ( NORM.EQ.'1' ) ) THEN
+ $ ( NORM.EQ.'1' ) ) THEN
*
* Find normI(A) ( = norm1(A), since A is symmetric).
*
@@ -290,7 +290,7 @@
* -> A(j+k,j+k)
WORK( J+K ) = S + AA
IF( I.EQ.K+K )
- + GO TO 10
+ $ GO TO 10
I = I + 1
AA = ABS( A( I+J*LDA ) )
* -> A(j,j)
@@ -736,7 +736,7 @@
END DO
DO J = 0, K - 2
CALL SLASSQ( K-J-1, A( J+1+( J+K-1 )*LDA ), 1,
- + SCALE, S )
+ $ SCALE, S )
* L at A(0,k-1)
END DO
S = S + S
@@ -818,7 +818,7 @@
END DO
DO J = 0, K - 2
CALL SLASSQ( K-J-1, A( J+1+( J+K )*LDA ), 1, SCALE,
- + S )
+ $ S )
* L at A(0,k)
END DO
S = S + S
diff --git a/SRC/slarrv.f b/SRC/slarrv.f
index b24e5f8c..a59cd945 100644
--- a/SRC/slarrv.f
+++ b/SRC/slarrv.f
@@ -332,7 +332,7 @@
* high relative accuracy is required for the computation of the
* corresponding eigenvectors.
CALL SCOPY( IM, W( WBEGIN ), 1,
- & WORK( WBEGIN ), 1 )
+ $ WORK( WBEGIN ), 1 )
* We store in W the eigenvalue approximations w.r.t. the original
* matrix T.
@@ -431,7 +431,7 @@
Q = INDEXW( WBEGIN-1+OLDLST )
* Offset for the arrays WORK, WGAP and WERR, i.e., the P-OFFSET
* through the Q-OFFSET elements of these arrays are to be used.
-C OFFSET = P-OLDFST
+* OFFSET = P-OLDFST
OFFSET = INDEXW( WBEGIN ) - 1
* perform limited bisection (if necessary) to get approximate
* eigenvalues to the precision needed.
@@ -570,7 +570,7 @@ C OFFSET = P-OLDFST
* Compute RRR of child cluster.
* Note that the new RRR is stored in Z
*
-C SLARRF needs LWORK = 2*N
+* SLARRF needs LWORK = 2*N
CALL SLARRF( IN, D( IBEGIN ), L( IBEGIN ),
$ WORK(INDLD+IBEGIN-1),
$ NEWFST, NEWLST, WORK(WBEGIN),
diff --git a/SRC/slasq4.f b/SRC/slasq4.f
index bf378f81..65324e68 100644
--- a/SRC/slasq4.f
+++ b/SRC/slasq4.f
@@ -25,6 +25,9 @@
* SLASQ4 computes an approximation TAU to the smallest eigenvalue
* using values of d from the previous transform.
*
+* Arguments
+* =========
+*
* I0 (input) INTEGER
* First index.
*
diff --git a/SRC/spftrf.f b/SRC/spftrf.f
index 47f2ca69..83ce11c1 100644
--- a/SRC/spftrf.f
+++ b/SRC/spftrf.f
@@ -192,7 +192,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -232,14 +232,14 @@
*
CALL SPOTRF( 'L', N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'R', 'L', 'T', 'N', N2, N1, ONE, A( 0 ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL SSYRK( 'U', 'N', N2, N1, -ONE, A( N1 ), N, ONE,
- + A( N ), N )
+ $ A( N ), N )
CALL SPOTRF( 'U', N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -249,14 +249,14 @@
*
CALL SPOTRF( 'L', N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'L', 'L', 'N', 'N', N1, N2, ONE, A( N2 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL SSYRK( 'U', 'T', N2, N1, -ONE, A( 0 ), N, ONE,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL SPOTRF( 'U', N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -272,14 +272,14 @@
*
CALL SPOTRF( 'U', N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'L', 'U', 'T', 'N', N1, N2, ONE, A( 0 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL SSYRK( 'L', 'T', N2, N1, -ONE, A( N1*N1 ), N1, ONE,
- + A( 1 ), N1 )
+ $ A( 1 ), N1 )
CALL SPOTRF( 'L', N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -289,14 +289,14 @@
*
CALL SPOTRF( 'U', N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'R', 'U', 'N', 'N', N2, N1, ONE, A( N2*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL SSYRK( 'L', 'N', N2, N1, -ONE, A( 0 ), N2, ONE,
- + A( N1*N2 ), N2 )
+ $ A( N1*N2 ), N2 )
CALL SPOTRF( 'L', N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -318,14 +318,14 @@
*
CALL SPOTRF( 'L', K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'R', 'L', 'T', 'N', K, K, ONE, A( 1 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL SSYRK( 'U', 'N', K, K, -ONE, A( K+1 ), N+1, ONE,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL SPOTRF( 'U', K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -335,14 +335,14 @@
*
CALL SPOTRF( 'L', K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'L', 'L', 'N', 'N', K, K, ONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL SSYRK( 'U', 'T', K, K, -ONE, A( 0 ), N+1, ONE,
- + A( K ), N+1 )
+ $ A( K ), N+1 )
CALL SPOTRF( 'U', K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
@@ -358,14 +358,14 @@
*
CALL SPOTRF( 'U', K, A( 0+K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'L', 'U', 'T', 'N', K, K, ONE, A( K ), N1,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL SSYRK( 'L', 'T', K, K, -ONE, A( K*( K+1 ) ), K, ONE,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL SPOTRF( 'L', K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -375,14 +375,14 @@
*
CALL SPOTRF( 'U', K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRSM( 'R', 'U', 'N', 'N', K, K, ONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL SSYRK( 'L', 'N', K, K, -ONE, A( 0 ), K, ONE,
- + A( K*K ), K )
+ $ A( K*K ), K )
CALL SPOTRF( 'L', K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
diff --git a/SRC/spftri.f b/SRC/spftri.f
index 14b6a5fc..16f7a5cf 100644
--- a/SRC/spftri.f
+++ b/SRC/spftri.f
@@ -184,13 +184,13 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Invert the triangular Cholesky factor U or L.
*
CALL STFTRI( TRANSR, UPLO, 'N', N, A, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -231,9 +231,9 @@
*
CALL SLAUUM( 'L', N1, A( 0 ), N, INFO )
CALL SSYRK( 'L', 'T', N1, N2, ONE, A( N1 ), N, ONE,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL STRMM( 'L', 'U', 'N', 'N', N2, N1, ONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL SLAUUM( 'U', N2, A( N ), N, INFO )
*
ELSE
@@ -244,9 +244,9 @@
*
CALL SLAUUM( 'L', N1, A( N2 ), N, INFO )
CALL SSYRK( 'L', 'N', N1, N2, ONE, A( 0 ), N, ONE,
- + A( N2 ), N )
+ $ A( N2 ), N )
CALL STRMM( 'R', 'U', 'T', 'N', N1, N2, ONE, A( N1 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL SLAUUM( 'U', N2, A( N1 ), N, INFO )
*
END IF
@@ -262,9 +262,9 @@
*
CALL SLAUUM( 'U', N1, A( 0 ), N1, INFO )
CALL SSYRK( 'U', 'N', N1, N2, ONE, A( N1*N1 ), N1, ONE,
- + A( 0 ), N1 )
+ $ A( 0 ), N1 )
CALL STRMM( 'R', 'L', 'N', 'N', N1, N2, ONE, A( 1 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL SLAUUM( 'L', N2, A( 1 ), N1, INFO )
*
ELSE
@@ -274,9 +274,9 @@
*
CALL SLAUUM( 'U', N1, A( N2*N2 ), N2, INFO )
CALL SSYRK( 'U', 'T', N1, N2, ONE, A( 0 ), N2, ONE,
- + A( N2*N2 ), N2 )
+ $ A( N2*N2 ), N2 )
CALL STRMM( 'L', 'L', 'T', 'N', N2, N1, ONE, A( N1*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL SLAUUM( 'L', N2, A( N1*N2 ), N2, INFO )
*
END IF
@@ -299,9 +299,9 @@
*
CALL SLAUUM( 'L', K, A( 1 ), N+1, INFO )
CALL SSYRK( 'L', 'T', K, K, ONE, A( K+1 ), N+1, ONE,
- + A( 1 ), N+1 )
+ $ A( 1 ), N+1 )
CALL STRMM( 'L', 'U', 'N', 'N', K, K, ONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL SLAUUM( 'U', K, A( 0 ), N+1, INFO )
*
ELSE
@@ -312,9 +312,9 @@
*
CALL SLAUUM( 'L', K, A( K+1 ), N+1, INFO )
CALL SSYRK( 'L', 'N', K, K, ONE, A( 0 ), N+1, ONE,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL STRMM( 'R', 'U', 'T', 'N', K, K, ONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL SLAUUM( 'U', K, A( K ), N+1, INFO )
*
END IF
@@ -331,9 +331,9 @@
*
CALL SLAUUM( 'U', K, A( K ), K, INFO )
CALL SSYRK( 'U', 'N', K, K, ONE, A( K*( K+1 ) ), K, ONE,
- + A( K ), K )
+ $ A( K ), K )
CALL STRMM( 'R', 'L', 'N', 'N', K, K, ONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL SLAUUM( 'L', K, A( 0 ), K, INFO )
*
ELSE
@@ -344,9 +344,9 @@
*
CALL SLAUUM( 'U', K, A( K*( K+1 ) ), K, INFO )
CALL SSYRK( 'U', 'T', K, K, ONE, A( 0 ), K, ONE,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL STRMM( 'L', 'L', 'T', 'N', K, K, ONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL SLAUUM( 'L', K, A( K*K ), K, INFO )
*
END IF
diff --git a/SRC/spftrs.f b/SRC/spftrs.f
index b01a4bb4..6b77dbdd 100644
--- a/SRC/spftrs.f
+++ b/SRC/spftrs.f
@@ -186,20 +186,20 @@
* Quick return if possible
*
IF( N.EQ.0 .OR. NRHS.EQ.0 )
- + RETURN
+ $ RETURN
*
* start execution: there are two triangular solves
*
IF( LOWER ) THEN
CALL STFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
CALL STFSM( TRANSR, 'L', UPLO, 'T', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
ELSE
CALL STFSM( TRANSR, 'L', UPLO, 'T', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
CALL STFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, ONE, A, B,
- + LDB )
+ $ LDB )
END IF
*
RETURN
diff --git a/SRC/ssfrk.f b/SRC/ssfrk.f
index dfdc1fdd..956e1755 100644
--- a/SRC/ssfrk.f
+++ b/SRC/ssfrk.f
@@ -1,5 +1,5 @@
SUBROUTINE SSFRK( TRANSR, UPLO, TRANS, N, K, ALPHA, A, LDA, BETA,
- + C )
+ $ C )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -106,10 +106,8 @@
* NT = N*(N+1)/2. On entry, the symmetric matrix C in RFP
* Format. RFP Format is described by TRANSR, UPLO and N.
*
-* Arguments
-* ==========
+* =====================================================================
*
-* ..
* .. Parameters ..
REAL ONE, ZERO
PARAMETER ( ONE = 1.0E+0, ZERO = 0.0E+0 )
@@ -167,7 +165,7 @@
* done (it is in SSYRK for example) and left in the general case.
*
IF( ( N.EQ.0 ) .OR. ( ( ( ALPHA.EQ.ZERO ) .OR. ( K.EQ.0 ) ) .AND.
- + ( BETA.EQ.ONE ) ) )RETURN
+ $ ( BETA.EQ.ONE ) ) )RETURN
*
IF( ( ALPHA.EQ.ZERO ) .AND. ( BETA.EQ.ZERO ) ) THEN
DO J = 1, ( ( N*( N+1 ) ) / 2 )
@@ -211,22 +209,22 @@
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL SSYRK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL SSYRK( 'U', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL SGEMM( 'N', 'T', N2, N1, K, ALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'T'
*
CALL SSYRK( 'L', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL SSYRK( 'U', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL SGEMM( 'T', 'N', N2, N1, K, ALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( N1+1 ), N )
*
END IF
*
@@ -239,22 +237,22 @@
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL SSYRK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL SSYRK( 'U', 'N', N2, K, ALPHA, A( N2, 1 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL SGEMM( 'N', 'T', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( N2, 1 ), LDA, BETA, C( 1 ), N )
+ $ LDA, A( N2, 1 ), LDA, BETA, C( 1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'T'
*
CALL SSYRK( 'L', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL SSYRK( 'U', 'T', N2, K, ALPHA, A( 1, N2 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL SGEMM( 'T', 'N', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, N2 ), LDA, BETA, C( 1 ), N )
+ $ LDA, A( 1, N2 ), LDA, BETA, C( 1 ), N )
*
END IF
*
@@ -273,24 +271,24 @@
* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'N'
*
CALL SSYRK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL SSYRK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL SGEMM( 'N', 'T', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( N1+1, 1 ), LDA, BETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( N1+1, 1 ), LDA, BETA,
+ $ C( N1*N1+1 ), N1 )
*
ELSE
*
* N is odd, TRANSR = 'T', UPLO = 'L', and TRANS = 'T'
*
CALL SSYRK( 'U', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL SSYRK( 'L', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL SGEMM( 'T', 'N', N1, N2, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, N1+1 ), LDA, BETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( 1, N1+1 ), LDA, BETA,
+ $ C( N1*N1+1 ), N1 )
*
END IF
*
@@ -303,22 +301,22 @@
* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'N'
*
CALL SSYRK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL SSYRK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL SGEMM( 'N', 'T', N2, N1, K, ALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
*
ELSE
*
* N is odd, TRANSR = 'T', UPLO = 'U', and TRANS = 'T'
*
CALL SSYRK( 'U', 'T', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL SSYRK( 'L', 'T', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL SGEMM( 'T', 'N', N2, N1, K, ALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), N2 )
*
END IF
*
@@ -343,24 +341,24 @@
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL SSYRK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL SSYRK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL SGEMM( 'N', 'T', NK, NK, K, ALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'T'
*
CALL SSYRK( 'L', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL SSYRK( 'U', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL SGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( NK+2 ),
+ $ N+1 )
*
END IF
*
@@ -373,24 +371,24 @@
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL SSYRK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL SSYRK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL SGEMM( 'N', 'T', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, BETA, C( 1 ),
- + N+1 )
+ $ LDA, A( NK+1, 1 ), LDA, BETA, C( 1 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'T'
*
CALL SSYRK( 'L', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL SSYRK( 'U', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL SGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, BETA, C( 1 ),
- + N+1 )
+ $ LDA, A( 1, NK+1 ), LDA, BETA, C( 1 ),
+ $ N+1 )
*
END IF
*
@@ -409,24 +407,24 @@
* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'N'
*
CALL SSYRK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL SSYRK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL SGEMM( 'N', 'T', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, BETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( NK+1, 1 ), LDA, BETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'T', UPLO = 'L', and TRANS = 'T'
*
CALL SSYRK( 'U', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL SSYRK( 'L', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL SGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, BETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( 1, NK+1 ), LDA, BETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
END IF
*
@@ -439,22 +437,22 @@
* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'N'
*
CALL SSYRK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL SSYRK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL SGEMM( 'N', 'T', NK, NK, K, ALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'T', UPLO = 'U', and TRANS = 'T'
*
CALL SSYRK( 'U', 'T', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL SSYRK( 'L', 'T', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL SGEMM( 'T', 'N', NK, NK, K, ALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, BETA, C( 1 ), NK )
*
END IF
*
diff --git a/SRC/stfsm.f b/SRC/stfsm.f
index 94a4ab02..3eee1f26 100644
--- a/SRC/stfsm.f
+++ b/SRC/stfsm.f
@@ -1,5 +1,5 @@
SUBROUTINE STFSM( TRANSR, SIDE, UPLO, TRANS, DIAG, M, N, ALPHA, A,
- + B, LDB )
+ $ B, LDB )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -222,7 +222,7 @@
* ..
* .. Local Scalars ..
LOGICAL LOWER, LSIDE, MISODD, NISODD, NORMALTRANSR,
- + NOTRANS
+ $ NOTRANS
INTEGER M1, M2, N1, N2, K, INFO, I, J
* ..
* .. External Functions ..
@@ -253,7 +253,7 @@
ELSE IF( .NOT.NOTRANS .AND. .NOT.LSAME( TRANS, 'T' ) ) THEN
INFO = -4
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -5
ELSE IF( M.LT.0 ) THEN
INFO = -6
@@ -270,7 +270,7 @@
* Quick return when ( (N.EQ.0).OR.(M.EQ.0) )
*
IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) )
- + RETURN
+ $ RETURN
*
* Quick return when ALPHA.EQ.(0D+0)
*
@@ -324,14 +324,14 @@
*
IF( M.EQ.1 ) THEN
CALL STRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A, M, B, LDB )
+ $ A, M, B, LDB )
ELSE
CALL STRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
CALL SGEMM( 'N', 'N', M2, N, M1, -ONE, A( M1 ),
- + M, B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ M, B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL STRSM( 'L', 'U', 'T', DIAG, M2, N, ONE,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -341,14 +341,14 @@
*
IF( M.EQ.1 ) THEN
CALL STRSM( 'L', 'L', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
ELSE
CALL STRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
CALL SGEMM( 'T', 'N', M1, N, M2, -ONE, A( M1 ),
- + M, B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ M, B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'L', 'T', DIAG, M1, N, ONE,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
END IF
*
END IF
@@ -363,11 +363,11 @@
* TRANS = 'N'
*
CALL STRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
CALL SGEMM( 'T', 'N', M2, N, M1, -ONE, A( 0 ), M,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL STRSM( 'L', 'U', 'T', DIAG, M2, N, ONE,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
*
ELSE
*
@@ -375,11 +375,11 @@
* TRANS = 'T'
*
CALL STRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
CALL SGEMM( 'N', 'N', M1, N, M2, -ONE, A( 0 ), M,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'L', 'T', DIAG, M1, N, ONE,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
*
END IF
*
@@ -400,15 +400,15 @@
*
IF( M.EQ.1 ) THEN
CALL STRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL STRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
CALL SGEMM( 'T', 'N', M2, N, M1, -ONE,
- + A( M1*M1 ), M1, B, LDB, ALPHA,
- + B( M1, 0 ), LDB )
+ $ A( M1*M1 ), M1, B, LDB, ALPHA,
+ $ B( M1, 0 ), LDB )
CALL STRSM( 'L', 'L', 'N', DIAG, M2, N, ONE,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -418,15 +418,15 @@
*
IF( M.EQ.1 ) THEN
CALL STRSM( 'L', 'U', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL STRSM( 'L', 'L', 'T', DIAG, M2, N, ALPHA,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
CALL SGEMM( 'N', 'N', M1, N, M2, -ONE,
- + A( M1*M1 ), M1, B( M1, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( M1*M1 ), M1, B( M1, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL STRSM( 'L', 'U', 'N', DIAG, M1, N, ONE,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
END IF
*
END IF
@@ -441,11 +441,11 @@
* TRANS = 'N'
*
CALL STRSM( 'L', 'U', 'T', DIAG, M1, N, ALPHA,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
CALL SGEMM( 'N', 'N', M2, N, M1, -ONE, A( 0 ), M2,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL STRSM( 'L', 'L', 'N', DIAG, M2, N, ONE,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
*
ELSE
*
@@ -453,11 +453,11 @@
* TRANS = 'T'
*
CALL STRSM( 'L', 'L', 'T', DIAG, M2, N, ALPHA,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
CALL SGEMM( 'T', 'N', M1, N, M2, -ONE, A( 0 ), M2,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'U', 'N', DIAG, M1, N, ONE,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
*
END IF
*
@@ -483,11 +483,11 @@
* and TRANS = 'N'
*
CALL STRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
CALL SGEMM( 'N', 'N', K, N, K, -ONE, A( K+1 ),
- + M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
CALL STRSM( 'L', 'U', 'T', DIAG, K, N, ONE,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
*
ELSE
*
@@ -495,11 +495,11 @@
* and TRANS = 'T'
*
CALL STRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
CALL SGEMM( 'T', 'N', K, N, K, -ONE, A( K+1 ),
- + M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'L', 'T', DIAG, K, N, ONE,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
*
END IF
*
@@ -513,22 +513,22 @@
* and TRANS = 'N'
*
CALL STRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
CALL SGEMM( 'T', 'N', K, N, K, -ONE, A( 0 ), M+1,
- + B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ B, LDB, ALPHA, B( K, 0 ), LDB )
CALL STRSM( 'L', 'U', 'T', DIAG, K, N, ONE,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
*
ELSE
*
* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U',
* and TRANS = 'T'
CALL STRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
CALL SGEMM( 'N', 'N', K, N, K, -ONE, A( 0 ), M+1,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'L', 'T', DIAG, K, N, ONE,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
*
END IF
*
@@ -548,12 +548,12 @@
* and TRANS = 'N'
*
CALL STRSM( 'L', 'U', 'T', DIAG, K, N, ALPHA,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
CALL SGEMM( 'T', 'N', K, N, K, -ONE,
- + A( K*( K+1 ) ), K, B, LDB, ALPHA,
- + B( K, 0 ), LDB )
+ $ A( K*( K+1 ) ), K, B, LDB, ALPHA,
+ $ B( K, 0 ), LDB )
CALL STRSM( 'L', 'L', 'N', DIAG, K, N, ONE,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -561,12 +561,12 @@
* and TRANS = 'T'
*
CALL STRSM( 'L', 'L', 'T', DIAG, K, N, ALPHA,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
CALL SGEMM( 'N', 'N', K, N, K, -ONE,
- + A( K*( K+1 ) ), K, B( K, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( K*( K+1 ) ), K, B( K, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL STRSM( 'L', 'U', 'N', DIAG, K, N, ONE,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
*
END IF
*
@@ -580,11 +580,11 @@
* and TRANS = 'N'
*
CALL STRSM( 'L', 'U', 'T', DIAG, K, N, ALPHA,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
CALL SGEMM( 'N', 'N', K, N, K, -ONE, A( 0 ), K, B,
- + LDB, ALPHA, B( K, 0 ), LDB )
+ $ LDB, ALPHA, B( K, 0 ), LDB )
CALL STRSM( 'L', 'L', 'N', DIAG, K, N, ONE,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -592,11 +592,11 @@
* and TRANS = 'T'
*
CALL STRSM( 'L', 'L', 'T', DIAG, K, N, ALPHA,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
CALL SGEMM( 'T', 'N', K, N, K, -ONE, A( 0 ), K,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL STRSM( 'L', 'U', 'N', DIAG, K, N, ONE,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
*
END IF
*
@@ -646,12 +646,12 @@
* TRANS = 'N'
*
CALL STRSM( 'R', 'U', 'T', DIAG, M, N2, ALPHA,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
CALL SGEMM( 'N', 'N', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL STRSM( 'R', 'L', 'N', DIAG, M, N1, ONE,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
*
ELSE
*
@@ -659,12 +659,12 @@
* TRANS = 'T'
*
CALL STRSM( 'R', 'L', 'T', DIAG, M, N1, ALPHA,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'T', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'N', DIAG, M, N2, ONE,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
*
END IF
*
@@ -678,12 +678,12 @@
* TRANS = 'N'
*
CALL STRSM( 'R', 'L', 'T', DIAG, M, N1, ALPHA,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'N', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'N', DIAG, M, N2, ONE,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
*
ELSE
*
@@ -691,11 +691,11 @@
* TRANS = 'T'
*
CALL STRSM( 'R', 'U', 'T', DIAG, M, N2, ALPHA,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
CALL SGEMM( 'N', 'T', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
CALL STRSM( 'R', 'L', 'N', DIAG, M, N1, ONE,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
*
END IF
*
@@ -715,12 +715,12 @@
* TRANS = 'N'
*
CALL STRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
CALL SGEMM( 'N', 'T', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'T', DIAG, M, N1, ONE,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -728,12 +728,12 @@
* TRANS = 'T'
*
CALL STRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'N', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL STRSM( 'R', 'L', 'T', DIAG, M, N2, ONE,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
*
END IF
*
@@ -747,12 +747,12 @@
* TRANS = 'N'
*
CALL STRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'T', M, N2, N1, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL STRSM( 'R', 'L', 'T', DIAG, M, N2, ONE,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
*
ELSE
*
@@ -760,12 +760,12 @@
* TRANS = 'T'
*
CALL STRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
CALL SGEMM( 'N', 'N', M, N1, N2, -ONE, B( 0, N1 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'T', DIAG, M, N1, ONE,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
*
END IF
*
@@ -791,12 +791,12 @@
* and TRANS = 'N'
*
CALL STRSM( 'R', 'U', 'T', DIAG, M, K, ALPHA,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
CALL SGEMM( 'N', 'N', M, K, K, -ONE, B( 0, K ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL STRSM( 'R', 'L', 'N', DIAG, M, K, ONE,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -804,12 +804,12 @@
* and TRANS = 'T'
*
CALL STRSM( 'R', 'L', 'T', DIAG, M, K, ALPHA,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'T', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'N', DIAG, M, K, ONE,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
*
END IF
*
@@ -823,12 +823,12 @@
* and TRANS = 'N'
*
CALL STRSM( 'R', 'L', 'T', DIAG, M, K, ALPHA,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'N', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL STRSM( 'R', 'U', 'N', DIAG, M, K, ONE,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
*
ELSE
*
@@ -836,12 +836,12 @@
* and TRANS = 'T'
*
CALL STRSM( 'R', 'U', 'T', DIAG, M, K, ALPHA,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
CALL SGEMM( 'N', 'T', M, K, K, -ONE, B( 0, K ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL STRSM( 'R', 'L', 'N', DIAG, M, K, ONE,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
*
END IF
*
@@ -861,12 +861,12 @@
* and TRANS = 'N'
*
CALL STRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
CALL SGEMM( 'N', 'T', M, K, K, -ONE, B( 0, K ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, 0 ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, 0 ), LDB )
CALL STRSM( 'R', 'U', 'T', DIAG, M, K, ONE,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
*
ELSE
*
@@ -874,12 +874,12 @@
* and TRANS = 'T'
*
CALL STRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'N', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, K ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, K ), LDB )
CALL STRSM( 'R', 'L', 'T', DIAG, M, K, ONE,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
*
END IF
*
@@ -893,11 +893,11 @@
* and TRANS = 'N'
*
CALL STRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
CALL SGEMM( 'N', 'T', M, K, K, -ONE, B( 0, 0 ),
- + LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
CALL STRSM( 'R', 'L', 'T', DIAG, M, K, ONE,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
*
ELSE
*
@@ -905,11 +905,11 @@
* and TRANS = 'T'
*
CALL STRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
CALL SGEMM( 'N', 'N', M, K, K, -ONE, B( 0, K ),
- + LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
CALL STRSM( 'R', 'U', 'T', DIAG, M, K, ONE,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
*
END IF
*
diff --git a/SRC/stftri.f b/SRC/stftri.f
index 631d0fca..a5ceb60b 100644
--- a/SRC/stftri.f
+++ b/SRC/stftri.f
@@ -181,7 +181,7 @@
ELSE IF( .NOT.LOWER .AND. .NOT.LSAME( UPLO, 'U' ) ) THEN
INFO = -2
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -3
ELSE IF( N.LT.0 ) THEN
INFO = -4
@@ -194,7 +194,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -235,16 +235,16 @@
*
CALL STRTRI( 'L', DIAG, N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'L', 'N', DIAG, N2, N1, -ONE, A( 0 ),
- + N, A( N1 ), N )
+ $ N, A( N1 ), N )
CALL STRTRI( 'U', DIAG, N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'U', 'T', DIAG, N2, N1, ONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
*
ELSE
*
@@ -254,16 +254,16 @@
*
CALL STRTRI( 'L', DIAG, N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'L', 'T', DIAG, N1, N2, -ONE, A( N2 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
CALL STRTRI( 'U', DIAG, N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'U', 'N', DIAG, N1, N2, ONE, A( N1 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
*
END IF
*
@@ -278,16 +278,16 @@
*
CALL STRTRI( 'U', DIAG, N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'U', 'N', DIAG, N1, N2, -ONE, A( 0 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
CALL STRTRI( 'L', DIAG, N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'L', 'T', DIAG, N1, N2, ONE, A( 1 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
*
ELSE
*
@@ -296,16 +296,16 @@
*
CALL STRTRI( 'U', DIAG, N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'U', 'T', DIAG, N2, N1, -ONE,
- + A( N2*N2 ), N2, A( 0 ), N2 )
+ $ A( N2*N2 ), N2, A( 0 ), N2 )
CALL STRTRI( 'L', DIAG, N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'L', 'N', DIAG, N2, N1, ONE,
- + A( N1*N2 ), N2, A( 0 ), N2 )
+ $ A( N1*N2 ), N2, A( 0 ), N2 )
END IF
*
END IF
@@ -326,16 +326,16 @@
*
CALL STRTRI( 'L', DIAG, K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'L', 'N', DIAG, K, K, -ONE, A( 1 ),
- + N+1, A( K+1 ), N+1 )
+ $ N+1, A( K+1 ), N+1 )
CALL STRTRI( 'U', DIAG, K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'U', 'T', DIAG, K, K, ONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
*
ELSE
*
@@ -345,16 +345,16 @@
*
CALL STRTRI( 'L', DIAG, K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'L', 'T', DIAG, K, K, -ONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL STRTRI( 'U', DIAG, K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'U', 'N', DIAG, K, K, ONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
END IF
ELSE
*
@@ -368,16 +368,16 @@
*
CALL STRTRI( 'U', DIAG, K, A( K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'U', 'N', DIAG, K, K, -ONE, A( K ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL STRTRI( 'L', DIAG, K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'L', 'T', DIAG, K, K, ONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
ELSE
*
* SRPA for UPPER, TRANSPOSE and N is even (see paper)
@@ -386,16 +386,16 @@
*
CALL STRTRI( 'U', DIAG, K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'R', 'U', 'T', DIAG, K, K, -ONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL STRTRI( 'L', DIAG, K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL STRMM( 'L', 'L', 'N', DIAG, K, K, ONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
END IF
END IF
END IF
diff --git a/SRC/stfttp.f b/SRC/stfttp.f
index 52bff33d..157d5c03 100644
--- a/SRC/stfttp.f
+++ b/SRC/stfttp.f
@@ -175,7 +175,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -218,7 +218,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/stfttr.f b/SRC/stfttr.f
index 2fa02ff2..f5bf0894 100644
--- a/SRC/stfttr.f
+++ b/SRC/stfttr.f
@@ -214,11 +214,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/stpttf.f b/SRC/stpttf.f
index 42b9d777..c633bb97 100644
--- a/SRC/stpttf.f
+++ b/SRC/stpttf.f
@@ -177,7 +177,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -220,7 +220,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/strttf.f b/SRC/strttf.f
index 2e1aab77..6c405f56 100644
--- a/SRC/strttf.f
+++ b/SRC/strttf.f
@@ -211,11 +211,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/zcgesv.f b/SRC/zcgesv.f
index ebf98aae..71611f90 100644
--- a/SRC/zcgesv.f
+++ b/SRC/zcgesv.f
@@ -1,5 +1,5 @@
SUBROUTINE ZCGESV( N, NRHS, A, LDA, IPIV, B, LDB, X, LDX, WORK,
- + SWORK, RWORK, ITER, INFO )
+ $ SWORK, RWORK, ITER, INFO )
*
* -- LAPACK PROTOTYPE driver routine (version 3.2.2) --
* -- LAPACK is a software package provided by Univ. of Tennessee, --
@@ -15,7 +15,7 @@
DOUBLE PRECISION RWORK( * )
COMPLEX SWORK( * )
COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( N, * ),
- + X( LDX, * )
+ $ X( LDX, * )
* ..
*
* Purpose
@@ -125,7 +125,7 @@
* factor U is exactly singular, so the solution
* could not be computed.
*
-* =========
+* =====================================================================
*
* .. Parameters ..
LOGICAL DOITREF
@@ -139,7 +139,7 @@
*
COMPLEX*16 NEGONE, ONE
PARAMETER ( NEGONE = ( -1.0D+00, 0.0D+00 ),
- + ONE = ( 1.0D+00, 0.0D+00 ) )
+ $ ONE = ( 1.0D+00, 0.0D+00 ) )
*
* .. Local Scalars ..
INTEGER I, IITER, PTSA, PTSX
@@ -148,7 +148,7 @@
*
* .. External Subroutines ..
EXTERNAL CGETRS, CGETRF, CLAG2Z, XERBLA, ZAXPY, ZGEMM,
- + ZLACPY, ZLAG2C
+ $ ZLACPY, ZLAG2C
* ..
* .. External Functions ..
INTEGER IZAMAX
@@ -190,7 +190,7 @@
* Quick return if (N.EQ.0).
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Skip single precision iterative refinement if a priori slower
* than double precision factorization.
@@ -243,7 +243,7 @@
* Solve the system SA*SX = SB.
*
CALL CGETRS( 'No transpose', N, NRHS, SWORK( PTSA ), N, IPIV,
- + SWORK( PTSX ), N, INFO )
+ $ SWORK( PTSX ), N, INFO )
*
* Convert SX back to double precision
*
@@ -254,7 +254,7 @@
CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL ZGEMM( 'No Transpose', 'No Transpose', N, NRHS, N, NEGONE, A,
- + LDA, X, LDX, ONE, WORK, N )
+ $ LDA, X, LDX, ONE, WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=0 and return.
@@ -263,7 +263,7 @@
XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 10
+ $ GO TO 10
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -289,7 +289,7 @@
* Solve the system SA*SX = SR.
*
CALL CGETRS( 'No transpose', N, NRHS, SWORK( PTSA ), N, IPIV,
- + SWORK( PTSX ), N, INFO )
+ $ SWORK( PTSX ), N, INFO )
*
* Convert SX back to double precision and update the current
* iterate.
@@ -305,7 +305,7 @@
CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL ZGEMM( 'No Transpose', 'No Transpose', N, NRHS, N, NEGONE,
- + A, LDA, X, LDX, ONE, WORK, N )
+ $ A, LDA, X, LDX, ONE, WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=IITER>0 and return.
@@ -314,7 +314,7 @@
XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 20
+ $ GO TO 20
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -343,11 +343,11 @@
CALL ZGETRF( N, N, A, LDA, IPIV, INFO )
*
IF( INFO.NE.0 )
- + RETURN
+ $ RETURN
*
CALL ZLACPY( 'All', N, NRHS, B, LDB, X, LDX )
CALL ZGETRS( 'No transpose', N, NRHS, A, LDA, IPIV, X, LDX,
- + INFO )
+ $ INFO )
*
RETURN
*
diff --git a/SRC/zcposv.f b/SRC/zcposv.f
index 0ba33ec1..7350826b 100644
--- a/SRC/zcposv.f
+++ b/SRC/zcposv.f
@@ -1,5 +1,5 @@
SUBROUTINE ZCPOSV( UPLO, N, NRHS, A, LDA, B, LDB, X, LDX, WORK,
- + SWORK, RWORK, ITER, INFO )
+ $ SWORK, RWORK, ITER, INFO )
*
* -- LAPACK PROTOTYPE driver routine (version 3.3.0) --
*
@@ -16,7 +16,7 @@
DOUBLE PRECISION RWORK( * )
COMPLEX SWORK( * )
COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( N, * ),
- + X( LDX, * )
+ $ X( LDX, * )
* ..
*
* Purpose
@@ -134,7 +134,7 @@
* factorization could not be completed, and the solution
* has not been computed.
*
-* =========
+* =====================================================================
*
* .. Parameters ..
LOGICAL DOITREF
@@ -148,7 +148,7 @@
*
COMPLEX*16 NEGONE, ONE
PARAMETER ( NEGONE = ( -1.0D+00, 0.0D+00 ),
- + ONE = ( 1.0D+00, 0.0D+00 ) )
+ $ ONE = ( 1.0D+00, 0.0D+00 ) )
*
* .. Local Scalars ..
INTEGER I, IITER, PTSA, PTSX
@@ -157,7 +157,7 @@
*
* .. External Subroutines ..
EXTERNAL ZAXPY, ZHEMM, ZLACPY, ZLAT2C, ZLAG2C, CLAG2Z,
- + CPOTRF, CPOTRS, XERBLA
+ $ CPOTRF, CPOTRS, XERBLA
* ..
* .. External Functions ..
INTEGER IZAMAX
@@ -201,7 +201,7 @@
* Quick return if (N.EQ.0).
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Skip single precision iterative refinement if a priori slower
* than double precision factorization.
@@ -254,7 +254,7 @@
* Solve the system SA*SX = SB.
*
CALL CPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N,
- + INFO )
+ $ INFO )
*
* Convert SX back to COMPLEX*16
*
@@ -265,7 +265,7 @@
CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL ZHEMM( 'Left', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE,
- + WORK, N )
+ $ WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=0 and return.
@@ -274,7 +274,7 @@
XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 10
+ $ GO TO 10
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -300,7 +300,7 @@
* Solve the system SA*SX = SR.
*
CALL CPOTRS( UPLO, N, NRHS, SWORK( PTSA ), N, SWORK( PTSX ), N,
- + INFO )
+ $ INFO )
*
* Convert SX back to double precision and update the current
* iterate.
@@ -316,7 +316,7 @@
CALL ZLACPY( 'All', N, NRHS, B, LDB, WORK, N )
*
CALL ZHEMM( 'L', UPLO, N, NRHS, NEGONE, A, LDA, X, LDX, ONE,
- + WORK, N )
+ $ WORK, N )
*
* Check whether the NRHS normwise backward errors satisfy the
* stopping criterion. If yes, set ITER=IITER>0 and return.
@@ -325,7 +325,7 @@
XNRM = CABS1( X( IZAMAX( N, X( 1, I ), 1 ), I ) )
RNRM = CABS1( WORK( IZAMAX( N, WORK( 1, I ), 1 ), I ) )
IF( RNRM.GT.XNRM*CTE )
- + GO TO 20
+ $ GO TO 20
END DO
*
* If we are here, the NRHS normwise backward errors satisfy the
@@ -354,7 +354,7 @@
CALL ZPOTRF( UPLO, N, A, LDA, INFO )
*
IF( INFO.NE.0 )
- + RETURN
+ $ RETURN
*
CALL ZLACPY( 'All', N, NRHS, B, LDB, X, LDX )
CALL ZPOTRS( UPLO, N, NRHS, A, LDA, X, LDX, INFO )
diff --git a/SRC/zheevr.f b/SRC/zheevr.f
index a127bf14..e97ef0fa 100644
--- a/SRC/zheevr.f
+++ b/SRC/zheevr.f
@@ -92,8 +92,8 @@
* = 'V': all eigenvalues in the half-open interval (VL,VU]
* will be found.
* = 'I': the IL-th through IU-th eigenvalues will be found.
-********** For RANGE = 'V' or 'I' and IU - IL < N - 1, DSTEBZ and
-********** ZSTEIN are called
+* For RANGE = 'V' or 'I' and IU - IL < N - 1, DSTEBZ and
+* ZSTEIN are called
*
* UPLO (input) CHARACTER*1
* = 'U': Upper triangle of A is stored;
@@ -183,7 +183,7 @@
* indicating the nonzero elements in Z. The i-th eigenvector
* is nonzero only in elements ISUPPZ( 2*i-1 ) through
* ISUPPZ( 2*i ).
-********** Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1
+* Implemented only for RANGE = 'A' or 'I' and IU - IL = N - 1
*
* WORK (workspace/output) COMPLEX*16 array, dimension (MAX(1,LWORK))
* On exit, if INFO = 0, WORK(1) returns the optimal LWORK.
@@ -204,7 +204,7 @@
* On exit, if INFO = 0, RWORK(1) returns the optimal
* (and minimal) LRWORK.
*
-* LRWORK (input) INTEGER
+* LRWORK (input) INTEGER
* The length of the array RWORK. LRWORK >= max(1,24*N).
*
* If LRWORK = -1, then a workspace query is assumed; the
@@ -217,7 +217,7 @@
* On exit, if INFO = 0, IWORK(1) returns the optimal
* (and minimal) LIWORK.
*
-* LIWORK (input) INTEGER
+* LIWORK (input) INTEGER
* The dimension of the array IWORK. LIWORK >= max(1,10*N).
*
* If LIWORK = -1, then a workspace query is assumed; the
@@ -242,7 +242,7 @@
* Jason Riedy, Computer Science Division, University of
* California at Berkeley, USA
*
-* =====================================================================
+* =====================================================================
*
* .. Parameters ..
DOUBLE PRECISION ZERO, ONE, TWO
diff --git a/SRC/zhfrk.f b/SRC/zhfrk.f
index 76f9a2d3..f85b75c5 100644
--- a/SRC/zhfrk.f
+++ b/SRC/zhfrk.f
@@ -1,5 +1,5 @@
SUBROUTINE ZHFRK( TRANSR, UPLO, TRANS, N, K, ALPHA, A, LDA, BETA,
- + C )
+ $ C )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -107,10 +107,8 @@
* parts of the diagonal elements need not be set, they are
* assumed to be zero, and on exit they are set to zero.
*
-* Arguments
-* ==========
+* =====================================================================
*
-* ..
* .. Parameters ..
DOUBLE PRECISION ONE, ZERO
COMPLEX*16 CZERO
@@ -172,7 +170,7 @@
* done (it is in ZHERK for example) and left in the general case.
*
IF( ( N.EQ.0 ) .OR. ( ( ( ALPHA.EQ.ZERO ) .OR. ( K.EQ.0 ) ) .AND.
- + ( BETA.EQ.ONE ) ) )RETURN
+ $ ( BETA.EQ.ONE ) ) )RETURN
*
IF( ( ALPHA.EQ.ZERO ) .AND. ( BETA.EQ.ZERO ) ) THEN
DO J = 1, ( ( N*( N+1 ) ) / 2 )
@@ -219,22 +217,22 @@
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL ZHERK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL ZHERK( 'U', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL ZGEMM( 'N', 'C', N2, N1, K, CALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'L', and TRANS = 'C'
*
CALL ZHERK( 'L', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N )
+ $ BETA, C( 1 ), N )
CALL ZHERK( 'U', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N+1 ), N )
+ $ BETA, C( N+1 ), N )
CALL ZGEMM( 'C', 'N', N2, N1, K, CALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( N1+1 ), N )
*
END IF
*
@@ -247,22 +245,22 @@
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL ZHERK( 'L', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL ZHERK( 'U', 'N', N2, K, ALPHA, A( N2, 1 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL ZGEMM( 'N', 'C', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( N2, 1 ), LDA, CBETA, C( 1 ), N )
+ $ LDA, A( N2, 1 ), LDA, CBETA, C( 1 ), N )
*
ELSE
*
* N is odd, TRANSR = 'N', UPLO = 'U', and TRANS = 'C'
*
CALL ZHERK( 'L', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2+1 ), N )
+ $ BETA, C( N2+1 ), N )
CALL ZHERK( 'U', 'C', N2, K, ALPHA, A( 1, N2 ), LDA,
- + BETA, C( N1+1 ), N )
+ $ BETA, C( N1+1 ), N )
CALL ZGEMM( 'C', 'N', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, N2 ), LDA, CBETA, C( 1 ), N )
+ $ LDA, A( 1, N2 ), LDA, CBETA, C( 1 ), N )
*
END IF
*
@@ -281,24 +279,24 @@
* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'N'
*
CALL ZHERK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL ZHERK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL ZGEMM( 'N', 'C', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( N1+1, 1 ), LDA, CBETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( N1+1, 1 ), LDA, CBETA,
+ $ C( N1*N1+1 ), N1 )
*
ELSE
*
* N is odd, TRANSR = 'C', UPLO = 'L', and TRANS = 'C'
*
CALL ZHERK( 'U', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 1 ), N1 )
+ $ BETA, C( 1 ), N1 )
CALL ZHERK( 'L', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( 2 ), N1 )
+ $ BETA, C( 2 ), N1 )
CALL ZGEMM( 'C', 'N', N1, N2, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, N1+1 ), LDA, CBETA,
- + C( N1*N1+1 ), N1 )
+ $ LDA, A( 1, N1+1 ), LDA, CBETA,
+ $ C( N1*N1+1 ), N1 )
*
END IF
*
@@ -311,22 +309,22 @@
* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'N'
*
CALL ZHERK( 'U', 'N', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL ZHERK( 'L', 'N', N2, K, ALPHA, A( N1+1, 1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL ZGEMM( 'N', 'C', N2, N1, K, CALPHA, A( N1+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
*
ELSE
*
* N is odd, TRANSR = 'C', UPLO = 'U', and TRANS = 'C'
*
CALL ZHERK( 'U', 'C', N1, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( N2*N2+1 ), N2 )
+ $ BETA, C( N2*N2+1 ), N2 )
CALL ZHERK( 'L', 'C', N2, K, ALPHA, A( 1, N1+1 ), LDA,
- + BETA, C( N1*N2+1 ), N2 )
+ $ BETA, C( N1*N2+1 ), N2 )
CALL ZGEMM( 'C', 'N', N2, N1, K, CALPHA, A( 1, N1+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), N2 )
*
END IF
*
@@ -351,24 +349,24 @@
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'N'
*
CALL ZHERK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL ZHERK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL ZGEMM( 'N', 'C', NK, NK, K, CALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'L', and TRANS = 'C'
*
CALL ZHERK( 'L', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( 2 ), N+1 )
+ $ BETA, C( 2 ), N+1 )
CALL ZHERK( 'U', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), N+1 )
+ $ BETA, C( 1 ), N+1 )
CALL ZGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
- + N+1 )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( NK+2 ),
+ $ N+1 )
*
END IF
*
@@ -381,24 +379,24 @@
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'N'
*
CALL ZHERK( 'L', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL ZHERK( 'U', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL ZGEMM( 'N', 'C', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, CBETA, C( 1 ),
- + N+1 )
+ $ LDA, A( NK+1, 1 ), LDA, CBETA, C( 1 ),
+ $ N+1 )
*
ELSE
*
* N is even, TRANSR = 'N', UPLO = 'U', and TRANS = 'C'
*
CALL ZHERK( 'L', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+2 ), N+1 )
+ $ BETA, C( NK+2 ), N+1 )
CALL ZHERK( 'U', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK+1 ), N+1 )
+ $ BETA, C( NK+1 ), N+1 )
CALL ZGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, CBETA, C( 1 ),
- + N+1 )
+ $ LDA, A( 1, NK+1 ), LDA, CBETA, C( 1 ),
+ $ N+1 )
*
END IF
*
@@ -417,24 +415,24 @@
* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'N'
*
CALL ZHERK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL ZHERK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL ZGEMM( 'N', 'C', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( NK+1, 1 ), LDA, CBETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( NK+1, 1 ), LDA, CBETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'C', UPLO = 'L', and TRANS = 'C'
*
CALL ZHERK( 'U', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK+1 ), NK )
+ $ BETA, C( NK+1 ), NK )
CALL ZHERK( 'L', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( 1 ), NK )
+ $ BETA, C( 1 ), NK )
CALL ZGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, 1 ),
- + LDA, A( 1, NK+1 ), LDA, CBETA,
- + C( ( ( NK+1 )*NK )+1 ), NK )
+ $ LDA, A( 1, NK+1 ), LDA, CBETA,
+ $ C( ( ( NK+1 )*NK )+1 ), NK )
*
END IF
*
@@ -447,22 +445,22 @@
* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'N'
*
CALL ZHERK( 'U', 'N', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL ZHERK( 'L', 'N', NK, K, ALPHA, A( NK+1, 1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL ZGEMM( 'N', 'C', NK, NK, K, CALPHA, A( NK+1, 1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
*
ELSE
*
* N is even, TRANSR = 'C', UPLO = 'U', and TRANS = 'C'
*
CALL ZHERK( 'U', 'C', NK, K, ALPHA, A( 1, 1 ), LDA,
- + BETA, C( NK*( NK+1 )+1 ), NK )
+ $ BETA, C( NK*( NK+1 )+1 ), NK )
CALL ZHERK( 'L', 'C', NK, K, ALPHA, A( 1, NK+1 ), LDA,
- + BETA, C( NK*NK+1 ), NK )
+ $ BETA, C( NK*NK+1 ), NK )
CALL ZGEMM( 'C', 'N', NK, NK, K, CALPHA, A( 1, NK+1 ),
- + LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
+ $ LDA, A( 1, 1 ), LDA, CBETA, C( 1 ), NK )
*
END IF
*
diff --git a/SRC/zlag2c.f b/SRC/zlag2c.f
index 445c4071..4d4f94b5 100644
--- a/SRC/zlag2c.f
+++ b/SRC/zlag2c.f
@@ -53,7 +53,7 @@
* PRECISION overflow threshold, in this case, the content
* of SA in exit is unspecified.
*
-* =========
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
@@ -72,9 +72,9 @@
DO 20 J = 1, N
DO 10 I = 1, M
IF( ( DBLE( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
+ $ ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
INFO = 1
GO TO 30
END IF
diff --git a/SRC/zlanhf.f b/SRC/zlanhf.f
index 25cb0020..ae5ddff4 100644
--- a/SRC/zlanhf.f
+++ b/SRC/zlanhf.f
@@ -227,19 +227,19 @@
*
NOE = 1
IF( MOD( N, 2 ).EQ.0 )
- + NOE = 0
+ $ NOE = 0
*
* set ifm = 0 when form='C' or 'c' and 1 otherwise
*
IFM = 1
IF( LSAME( TRANSR, 'C' ) )
- + IFM = 0
+ $ IFM = 0
*
* set ilu = 0 when uplo='U or 'u' and 1 otherwise
*
ILU = 1
IF( LSAME( UPLO, 'U' ) )
- + ILU = 0
+ $ ILU = 0
*
* set lda = (n+1)/2 when ifm = 0
* set lda = n when ifm = 1 and noe = 1
@@ -498,7 +498,7 @@
END IF
END IF
ELSE IF( ( LSAME( NORM, 'I' ) ) .OR. ( LSAME( NORM, 'O' ) ) .OR.
- + ( NORM.EQ.'1' ) ) THEN
+ $ ( NORM.EQ.'1' ) ) THEN
*
* Find normI(A) ( = norm1(A), since A is Hermitian).
*
@@ -524,7 +524,7 @@
* -> A(j+k,j+k)
WORK( J+K ) = S + AA
IF( I.EQ.K+K )
- + GO TO 10
+ $ GO TO 10
I = I + 1
AA = ABS( DBLE( A( I+J*LDA ) ) )
* -> A(j,j)
@@ -1037,7 +1037,7 @@
END DO
DO J = 0, K - 2
CALL ZLASSQ( K-J-1, A( J+1+( J+K-1 )*LDA ), 1,
- + SCALE, S )
+ $ SCALE, S )
* L at A(0,k-1)
END DO
S = S + S
@@ -1226,7 +1226,7 @@
END DO
DO J = 0, K - 2
CALL ZLASSQ( K-J-1, A( J+1+( J+K )*LDA ), 1, SCALE,
- + S )
+ $ S )
* L at A(0,k)
END DO
S = S + S
diff --git a/SRC/zlarrv.f b/SRC/zlarrv.f
index 201f83fa..854872f2 100644
--- a/SRC/zlarrv.f
+++ b/SRC/zlarrv.f
@@ -338,7 +338,7 @@
* high relative accuracy is required for the computation of the
* corresponding eigenvectors.
CALL DCOPY( IM, W( WBEGIN ), 1,
- & WORK( WBEGIN ), 1 )
+ $ WORK( WBEGIN ), 1 )
* We store in W the eigenvalue approximations w.r.t. the original
* matrix T.
@@ -441,7 +441,7 @@
Q = INDEXW( WBEGIN-1+OLDLST )
* Offset for the arrays WORK, WGAP and WERR, i.e., the P-OFFSET
* through the Q-OFFSET elements of these arrays are to be used.
-C OFFSET = P-OLDFST
+* OFFSET = P-OLDFST
OFFSET = INDEXW( WBEGIN ) - 1
* perform limited bisection (if necessary) to get approximate
* eigenvalues to the precision needed.
@@ -580,7 +580,7 @@ C OFFSET = P-OLDFST
* Compute RRR of child cluster.
* Note that the new RRR is stored in Z
*
-C DLARRF needs LWORK = 2*N
+* DLARRF needs LWORK = 2*N
CALL DLARRF( IN, D( IBEGIN ), L( IBEGIN ),
$ WORK(INDLD+IBEGIN-1),
$ NEWFST, NEWLST, WORK(WBEGIN),
diff --git a/SRC/zlat2c.f b/SRC/zlat2c.f
index 8a7fbcfe..674b2c53 100644
--- a/SRC/zlat2c.f
+++ b/SRC/zlat2c.f
@@ -56,7 +56,7 @@
* PRECISION overflow threshold, in this case, the content
* of the UPLO part of SA in exit is unspecified.
*
-* =========
+* =====================================================================
*
* .. Local Scalars ..
INTEGER I, J
@@ -79,9 +79,9 @@
DO 20 J = 1, N
DO 10 I = 1, J
IF( ( DBLE( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
+ $ ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
INFO = 1
GO TO 50
END IF
@@ -92,9 +92,9 @@
DO 40 J = 1, N
DO 30 I = J, N
IF( ( DBLE( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
- + ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
+ $ ( DBLE( A( I, J ) ).GT.RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).LT.-RMAX ) .OR.
+ $ ( DIMAG( A( I, J ) ).GT.RMAX ) ) THEN
INFO = 1
GO TO 50
END IF
diff --git a/SRC/zpftrf.f b/SRC/zpftrf.f
index 89151c62..b2509072 100644
--- a/SRC/zpftrf.f
+++ b/SRC/zpftrf.f
@@ -214,7 +214,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -254,14 +254,14 @@
*
CALL ZPOTRF( 'L', N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'R', 'L', 'C', 'N', N2, N1, CONE, A( 0 ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL ZHERK( 'U', 'N', N2, N1, -ONE, A( N1 ), N, ONE,
- + A( N ), N )
+ $ A( N ), N )
CALL ZPOTRF( 'U', N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -271,14 +271,14 @@
*
CALL ZPOTRF( 'L', N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'L', 'L', 'N', 'N', N1, N2, CONE, A( N2 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL ZHERK( 'U', 'C', N2, N1, -ONE, A( 0 ), N, ONE,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL ZPOTRF( 'U', N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -294,14 +294,14 @@
*
CALL ZPOTRF( 'U', N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'L', 'U', 'C', 'N', N1, N2, CONE, A( 0 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL ZHERK( 'L', 'C', N2, N1, -ONE, A( N1*N1 ), N1, ONE,
- + A( 1 ), N1 )
+ $ A( 1 ), N1 )
CALL ZPOTRF( 'L', N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
ELSE
*
@@ -311,14 +311,14 @@
*
CALL ZPOTRF( 'U', N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'R', 'U', 'N', 'N', N2, N1, CONE, A( N2*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL ZHERK( 'L', 'N', N2, N1, -ONE, A( 0 ), N2, ONE,
- + A( N1*N2 ), N2 )
+ $ A( N1*N2 ), N2 )
CALL ZPOTRF( 'L', N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
*
END IF
*
@@ -340,14 +340,14 @@
*
CALL ZPOTRF( 'L', K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'R', 'L', 'C', 'N', K, K, CONE, A( 1 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL ZHERK( 'U', 'N', K, K, -ONE, A( K+1 ), N+1, ONE,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL ZPOTRF( 'U', K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -357,14 +357,14 @@
*
CALL ZPOTRF( 'L', K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'L', 'L', 'N', 'N', K, K, CONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL ZHERK( 'U', 'C', K, K, -ONE, A( 0 ), N+1, ONE,
- + A( K ), N+1 )
+ $ A( K ), N+1 )
CALL ZPOTRF( 'U', K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
@@ -380,14 +380,14 @@
*
CALL ZPOTRF( 'U', K, A( 0+K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'L', 'U', 'C', 'N', K, K, CONE, A( K ), N1,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL ZHERK( 'L', 'C', K, K, -ONE, A( K*( K+1 ) ), K, ONE,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL ZPOTRF( 'L', K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
ELSE
*
@@ -397,14 +397,14 @@
*
CALL ZPOTRF( 'U', K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRSM( 'R', 'U', 'N', 'N', K, K, CONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL ZHERK( 'L', 'N', K, K, -ONE, A( 0 ), K, ONE,
- + A( K*K ), K )
+ $ A( K*K ), K )
CALL ZPOTRF( 'L', K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
*
END IF
*
diff --git a/SRC/zpftri.f b/SRC/zpftri.f
index 60d8ba88..91eb9a83 100644
--- a/SRC/zpftri.f
+++ b/SRC/zpftri.f
@@ -206,13 +206,13 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* Invert the triangular Cholesky factor U or L.
*
CALL ZTFTRI( TRANSR, UPLO, 'N', N, A, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -253,9 +253,9 @@
*
CALL ZLAUUM( 'L', N1, A( 0 ), N, INFO )
CALL ZHERK( 'L', 'C', N1, N2, ONE, A( N1 ), N, ONE,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL ZTRMM( 'L', 'U', 'N', 'N', N2, N1, CONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
CALL ZLAUUM( 'U', N2, A( N ), N, INFO )
*
ELSE
@@ -266,9 +266,9 @@
*
CALL ZLAUUM( 'L', N1, A( N2 ), N, INFO )
CALL ZHERK( 'L', 'N', N1, N2, ONE, A( 0 ), N, ONE,
- + A( N2 ), N )
+ $ A( N2 ), N )
CALL ZTRMM( 'R', 'U', 'C', 'N', N1, N2, CONE, A( N1 ), N,
- + A( 0 ), N )
+ $ A( 0 ), N )
CALL ZLAUUM( 'U', N2, A( N1 ), N, INFO )
*
END IF
@@ -284,9 +284,9 @@
*
CALL ZLAUUM( 'U', N1, A( 0 ), N1, INFO )
CALL ZHERK( 'U', 'N', N1, N2, ONE, A( N1*N1 ), N1, ONE,
- + A( 0 ), N1 )
+ $ A( 0 ), N1 )
CALL ZTRMM( 'R', 'L', 'N', 'N', N1, N2, CONE, A( 1 ), N1,
- + A( N1*N1 ), N1 )
+ $ A( N1*N1 ), N1 )
CALL ZLAUUM( 'L', N2, A( 1 ), N1, INFO )
*
ELSE
@@ -296,9 +296,9 @@
*
CALL ZLAUUM( 'U', N1, A( N2*N2 ), N2, INFO )
CALL ZHERK( 'U', 'C', N1, N2, ONE, A( 0 ), N2, ONE,
- + A( N2*N2 ), N2 )
+ $ A( N2*N2 ), N2 )
CALL ZTRMM( 'L', 'L', 'C', 'N', N2, N1, CONE, A( N1*N2 ),
- + N2, A( 0 ), N2 )
+ $ N2, A( 0 ), N2 )
CALL ZLAUUM( 'L', N2, A( N1*N2 ), N2, INFO )
*
END IF
@@ -321,9 +321,9 @@
*
CALL ZLAUUM( 'L', K, A( 1 ), N+1, INFO )
CALL ZHERK( 'L', 'C', K, K, ONE, A( K+1 ), N+1, ONE,
- + A( 1 ), N+1 )
+ $ A( 1 ), N+1 )
CALL ZTRMM( 'L', 'U', 'N', 'N', K, K, CONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL ZLAUUM( 'U', K, A( 0 ), N+1, INFO )
*
ELSE
@@ -334,9 +334,9 @@
*
CALL ZLAUUM( 'L', K, A( K+1 ), N+1, INFO )
CALL ZHERK( 'L', 'N', K, K, ONE, A( 0 ), N+1, ONE,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
CALL ZTRMM( 'R', 'U', 'C', 'N', K, K, CONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
CALL ZLAUUM( 'U', K, A( K ), N+1, INFO )
*
END IF
@@ -353,9 +353,9 @@
*
CALL ZLAUUM( 'U', K, A( K ), K, INFO )
CALL ZHERK( 'U', 'N', K, K, ONE, A( K*( K+1 ) ), K, ONE,
- + A( K ), K )
+ $ A( K ), K )
CALL ZTRMM( 'R', 'L', 'N', 'N', K, K, CONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL ZLAUUM( 'L', K, A( 0 ), K, INFO )
*
ELSE
@@ -366,9 +366,9 @@
*
CALL ZLAUUM( 'U', K, A( K*( K+1 ) ), K, INFO )
CALL ZHERK( 'U', 'C', K, K, ONE, A( 0 ), K, ONE,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL ZTRMM( 'L', 'L', 'C', 'N', K, K, CONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
CALL ZLAUUM( 'L', K, A( K*K ), K, INFO )
*
END IF
diff --git a/SRC/zpftrs.f b/SRC/zpftrs.f
index 2db93202..65c43f8d 100644
--- a/SRC/zpftrs.f
+++ b/SRC/zpftrs.f
@@ -207,20 +207,20 @@
* Quick return if possible
*
IF( N.EQ.0 .OR. NRHS.EQ.0 )
- + RETURN
+ $ RETURN
*
* start execution: there are two triangular solves
*
IF( LOWER ) THEN
CALL ZTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
CALL ZTFSM( TRANSR, 'L', UPLO, 'C', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
ELSE
CALL ZTFSM( TRANSR, 'L', UPLO, 'C', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
CALL ZTFSM( TRANSR, 'L', UPLO, 'N', 'N', N, NRHS, CONE, A, B,
- + LDB )
+ $ LDB )
END IF
*
RETURN
diff --git a/SRC/ztfsm.f b/SRC/ztfsm.f
index bd2281a0..e40fbf78 100644
--- a/SRC/ztfsm.f
+++ b/SRC/ztfsm.f
@@ -1,5 +1,5 @@
SUBROUTINE ZTFSM( TRANSR, SIDE, UPLO, TRANS, DIAG, M, N, ALPHA, A,
- + B, LDB )
+ $ B, LDB )
*
* -- LAPACK routine (version 3.3.0) --
*
@@ -231,15 +231,16 @@
* -- -- -- -- -- -- -- -- --
* 04 14 24 34 44 43 44 22 32 42 52
*
+* =====================================================================
* ..
* .. Parameters ..
COMPLEX*16 CONE, CZERO
PARAMETER ( CONE = ( 1.0D+0, 0.0D+0 ),
- + CZERO = ( 0.0D+0, 0.0D+0 ) )
+ $ CZERO = ( 0.0D+0, 0.0D+0 ) )
* ..
* .. Local Scalars ..
LOGICAL LOWER, LSIDE, MISODD, NISODD, NORMALTRANSR,
- + NOTRANS
+ $ NOTRANS
INTEGER M1, M2, N1, N2, K, INFO, I, J
* ..
* .. External Functions ..
@@ -270,7 +271,7 @@
ELSE IF( .NOT.NOTRANS .AND. .NOT.LSAME( TRANS, 'C' ) ) THEN
INFO = -4
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -5
ELSE IF( M.LT.0 ) THEN
INFO = -6
@@ -287,7 +288,7 @@
* Quick return when ( (N.EQ.0).OR.(M.EQ.0) )
*
IF( ( M.EQ.0 ) .OR. ( N.EQ.0 ) )
- + RETURN
+ $ RETURN
*
* Quick return when ALPHA.EQ.(0D+0,0D+0)
*
@@ -341,14 +342,14 @@
*
IF( M.EQ.1 ) THEN
CALL ZTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A, M, B, LDB )
+ $ A, M, B, LDB )
ELSE
CALL ZTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
CALL ZGEMM( 'N', 'N', M2, N, M1, -CONE, A( M1 ),
- + M, B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ M, B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL ZTRSM( 'L', 'U', 'C', DIAG, M2, N, CONE,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -358,14 +359,14 @@
*
IF( M.EQ.1 ) THEN
CALL ZTRSM( 'L', 'L', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
ELSE
CALL ZTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M ), M, B( M1, 0 ), LDB )
+ $ A( M ), M, B( M1, 0 ), LDB )
CALL ZGEMM( 'C', 'N', M1, N, M2, -CONE, A( M1 ),
- + M, B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ M, B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'L', 'C', DIAG, M1, N, CONE,
- + A( 0 ), M, B, LDB )
+ $ A( 0 ), M, B, LDB )
END IF
*
END IF
@@ -380,11 +381,11 @@
* TRANS = 'N'
*
CALL ZTRSM( 'L', 'L', 'N', DIAG, M1, N, ALPHA,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
CALL ZGEMM( 'C', 'N', M2, N, M1, -CONE, A( 0 ), M,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL ZTRSM( 'L', 'U', 'C', DIAG, M2, N, CONE,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
*
ELSE
*
@@ -392,11 +393,11 @@
* TRANS = 'C'
*
CALL ZTRSM( 'L', 'U', 'N', DIAG, M2, N, ALPHA,
- + A( M1 ), M, B( M1, 0 ), LDB )
+ $ A( M1 ), M, B( M1, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M1, N, M2, -CONE, A( 0 ), M,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'L', 'C', DIAG, M1, N, CONE,
- + A( M2 ), M, B, LDB )
+ $ A( M2 ), M, B, LDB )
*
END IF
*
@@ -417,15 +418,15 @@
*
IF( M.EQ.1 ) THEN
CALL ZTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL ZTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
CALL ZGEMM( 'C', 'N', M2, N, M1, -CONE,
- + A( M1*M1 ), M1, B, LDB, ALPHA,
- + B( M1, 0 ), LDB )
+ $ A( M1*M1 ), M1, B, LDB, ALPHA,
+ $ B( M1, 0 ), LDB )
CALL ZTRSM( 'L', 'L', 'N', DIAG, M2, N, CONE,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
END IF
*
ELSE
@@ -435,15 +436,15 @@
*
IF( M.EQ.1 ) THEN
CALL ZTRSM( 'L', 'U', 'N', DIAG, M1, N, ALPHA,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
ELSE
CALL ZTRSM( 'L', 'L', 'C', DIAG, M2, N, ALPHA,
- + A( 1 ), M1, B( M1, 0 ), LDB )
+ $ A( 1 ), M1, B( M1, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M1, N, M2, -CONE,
- + A( M1*M1 ), M1, B( M1, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( M1*M1 ), M1, B( M1, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL ZTRSM( 'L', 'U', 'N', DIAG, M1, N, CONE,
- + A( 0 ), M1, B, LDB )
+ $ A( 0 ), M1, B, LDB )
END IF
*
END IF
@@ -458,11 +459,11 @@
* TRANS = 'N'
*
CALL ZTRSM( 'L', 'U', 'C', DIAG, M1, N, ALPHA,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
CALL ZGEMM( 'N', 'N', M2, N, M1, -CONE, A( 0 ), M2,
- + B, LDB, ALPHA, B( M1, 0 ), LDB )
+ $ B, LDB, ALPHA, B( M1, 0 ), LDB )
CALL ZTRSM( 'L', 'L', 'N', DIAG, M2, N, CONE,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
*
ELSE
*
@@ -470,11 +471,11 @@
* TRANS = 'C'
*
CALL ZTRSM( 'L', 'L', 'C', DIAG, M2, N, ALPHA,
- + A( M1*M2 ), M2, B( M1, 0 ), LDB )
+ $ A( M1*M2 ), M2, B( M1, 0 ), LDB )
CALL ZGEMM( 'C', 'N', M1, N, M2, -CONE, A( 0 ), M2,
- + B( M1, 0 ), LDB, ALPHA, B, LDB )
+ $ B( M1, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'U', 'N', DIAG, M1, N, CONE,
- + A( M2*M2 ), M2, B, LDB )
+ $ A( M2*M2 ), M2, B, LDB )
*
END IF
*
@@ -500,11 +501,11 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
CALL ZGEMM( 'N', 'N', K, N, K, -CONE, A( K+1 ),
- + M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ M+1, B, LDB, ALPHA, B( K, 0 ), LDB )
CALL ZTRSM( 'L', 'U', 'C', DIAG, K, N, CONE,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
*
ELSE
*
@@ -512,11 +513,11 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( 0 ), M+1, B( K, 0 ), LDB )
+ $ A( 0 ), M+1, B( K, 0 ), LDB )
CALL ZGEMM( 'C', 'N', K, N, K, -CONE, A( K+1 ),
- + M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ M+1, B( K, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'L', 'C', DIAG, K, N, CONE,
- + A( 1 ), M+1, B, LDB )
+ $ A( 1 ), M+1, B, LDB )
*
END IF
*
@@ -530,22 +531,22 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'L', 'L', 'N', DIAG, K, N, ALPHA,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
CALL ZGEMM( 'C', 'N', K, N, K, -CONE, A( 0 ), M+1,
- + B, LDB, ALPHA, B( K, 0 ), LDB )
+ $ B, LDB, ALPHA, B( K, 0 ), LDB )
CALL ZTRSM( 'L', 'U', 'C', DIAG, K, N, CONE,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
*
ELSE
*
* SIDE ='L', N is even, TRANSR = 'N', UPLO = 'U',
* and TRANS = 'C'
CALL ZTRSM( 'L', 'U', 'N', DIAG, K, N, ALPHA,
- + A( K ), M+1, B( K, 0 ), LDB )
+ $ A( K ), M+1, B( K, 0 ), LDB )
CALL ZGEMM( 'N', 'N', K, N, K, -CONE, A( 0 ), M+1,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'L', 'C', DIAG, K, N, CONE,
- + A( K+1 ), M+1, B, LDB )
+ $ A( K+1 ), M+1, B, LDB )
*
END IF
*
@@ -565,12 +566,12 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'L', 'U', 'C', DIAG, K, N, ALPHA,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
CALL ZGEMM( 'C', 'N', K, N, K, -CONE,
- + A( K*( K+1 ) ), K, B, LDB, ALPHA,
- + B( K, 0 ), LDB )
+ $ A( K*( K+1 ) ), K, B, LDB, ALPHA,
+ $ B( K, 0 ), LDB )
CALL ZTRSM( 'L', 'L', 'N', DIAG, K, N, CONE,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -578,12 +579,12 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'L', 'L', 'C', DIAG, K, N, ALPHA,
- + A( 0 ), K, B( K, 0 ), LDB )
+ $ A( 0 ), K, B( K, 0 ), LDB )
CALL ZGEMM( 'N', 'N', K, N, K, -CONE,
- + A( K*( K+1 ) ), K, B( K, 0 ), LDB,
- + ALPHA, B, LDB )
+ $ A( K*( K+1 ) ), K, B( K, 0 ), LDB,
+ $ ALPHA, B, LDB )
CALL ZTRSM( 'L', 'U', 'N', DIAG, K, N, CONE,
- + A( K ), K, B, LDB )
+ $ A( K ), K, B, LDB )
*
END IF
*
@@ -597,11 +598,11 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'L', 'U', 'C', DIAG, K, N, ALPHA,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
CALL ZGEMM( 'N', 'N', K, N, K, -CONE, A( 0 ), K, B,
- + LDB, ALPHA, B( K, 0 ), LDB )
+ $ LDB, ALPHA, B( K, 0 ), LDB )
CALL ZTRSM( 'L', 'L', 'N', DIAG, K, N, CONE,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
*
ELSE
*
@@ -609,11 +610,11 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'L', 'L', 'C', DIAG, K, N, ALPHA,
- + A( K*K ), K, B( K, 0 ), LDB )
+ $ A( K*K ), K, B( K, 0 ), LDB )
CALL ZGEMM( 'C', 'N', K, N, K, -CONE, A( 0 ), K,
- + B( K, 0 ), LDB, ALPHA, B, LDB )
+ $ B( K, 0 ), LDB, ALPHA, B, LDB )
CALL ZTRSM( 'L', 'U', 'N', DIAG, K, N, CONE,
- + A( K*( K+1 ) ), K, B, LDB )
+ $ A( K*( K+1 ) ), K, B, LDB )
*
END IF
*
@@ -663,12 +664,12 @@
* TRANS = 'N'
*
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, N2, ALPHA,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
CALL ZGEMM( 'N', 'N', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, N1, CONE,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
*
ELSE
*
@@ -676,12 +677,12 @@
* TRANS = 'C'
*
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, N1, ALPHA,
- + A( 0 ), N, B( 0, 0 ), LDB )
+ $ A( 0 ), N, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'C', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, N2, CONE,
- + A( N ), N, B( 0, N1 ), LDB )
+ $ A( N ), N, B( 0, N1 ), LDB )
*
END IF
*
@@ -695,12 +696,12 @@
* TRANS = 'N'
*
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, N1, ALPHA,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, N2, CONE,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
*
ELSE
*
@@ -708,11 +709,11 @@
* TRANS = 'C'
*
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, N2, ALPHA,
- + A( N1 ), N, B( 0, N1 ), LDB )
+ $ A( N1 ), N, B( 0, N1 ), LDB )
CALL ZGEMM( 'N', 'C', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), N, ALPHA, B( 0, 0 ), LDB )
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, N1, CONE,
- + A( N2 ), N, B( 0, 0 ), LDB )
+ $ A( N2 ), N, B( 0, 0 ), LDB )
*
END IF
*
@@ -732,12 +733,12 @@
* TRANS = 'N'
*
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
CALL ZGEMM( 'N', 'C', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, N1, CONE,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -745,12 +746,12 @@
* TRANS = 'C'
*
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( 0 ), N1, B( 0, 0 ), LDB )
+ $ A( 0 ), N1, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( N1*N1 ), N1, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, N2, CONE,
- + A( 1 ), N1, B( 0, N1 ), LDB )
+ $ A( 1 ), N1, B( 0, N1 ), LDB )
*
END IF
*
@@ -764,12 +765,12 @@
* TRANS = 'N'
*
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, N1, ALPHA,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'C', M, N2, N1, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, N1 ),
+ $ LDB )
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, N2, CONE,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
*
ELSE
*
@@ -777,12 +778,12 @@
* TRANS = 'C'
*
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, N2, ALPHA,
- + A( N1*N2 ), N2, B( 0, N1 ), LDB )
+ $ A( N1*N2 ), N2, B( 0, N1 ), LDB )
CALL ZGEMM( 'N', 'N', M, N1, N2, -CONE, B( 0, N1 ),
- + LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N2, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, N1, CONE,
- + A( N2*N2 ), N2, B( 0, 0 ), LDB )
+ $ A( N2*N2 ), N2, B( 0, 0 ), LDB )
*
END IF
*
@@ -808,12 +809,12 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, K, ALPHA,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
CALL ZGEMM( 'N', 'N', M, K, K, -CONE, B( 0, K ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, K, CONE,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
*
ELSE
*
@@ -821,12 +822,12 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, K, ALPHA,
- + A( 1 ), N+1, B( 0, 0 ), LDB )
+ $ A( 1 ), N+1, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'C', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( K+1 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, K, CONE,
- + A( 0 ), N+1, B( 0, K ), LDB )
+ $ A( 0 ), N+1, B( 0, K ), LDB )
*
END IF
*
@@ -840,12 +841,12 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, K, ALPHA,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, K ),
+ $ LDB )
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, K, CONE,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
*
ELSE
*
@@ -853,12 +854,12 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, K, ALPHA,
- + A( K ), N+1, B( 0, K ), LDB )
+ $ A( K ), N+1, B( 0, K ), LDB )
CALL ZGEMM( 'N', 'C', M, K, K, -CONE, B( 0, K ),
- + LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
- + LDB )
+ $ LDB, A( 0 ), N+1, ALPHA, B( 0, 0 ),
+ $ LDB )
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, K, CONE,
- + A( K+1 ), N+1, B( 0, 0 ), LDB )
+ $ A( K+1 ), N+1, B( 0, 0 ), LDB )
*
END IF
*
@@ -878,12 +879,12 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
CALL ZGEMM( 'N', 'C', M, K, K, -CONE, B( 0, K ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, 0 ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, 0 ), LDB )
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, K, CONE,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
*
ELSE
*
@@ -891,12 +892,12 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( K ), K, B( 0, 0 ), LDB )
+ $ A( K ), K, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'N', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( ( K+1 )*K ), K, ALPHA,
- + B( 0, K ), LDB )
+ $ LDB, A( ( K+1 )*K ), K, ALPHA,
+ $ B( 0, K ), LDB )
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, K, CONE,
- + A( 0 ), K, B( 0, K ), LDB )
+ $ A( 0 ), K, B( 0, K ), LDB )
*
END IF
*
@@ -910,11 +911,11 @@
* and TRANS = 'N'
*
CALL ZTRSM( 'R', 'U', 'N', DIAG, M, K, ALPHA,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
CALL ZGEMM( 'N', 'C', M, K, K, -CONE, B( 0, 0 ),
- + LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, K ), LDB )
CALL ZTRSM( 'R', 'L', 'C', DIAG, M, K, CONE,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
*
ELSE
*
@@ -922,11 +923,11 @@
* and TRANS = 'C'
*
CALL ZTRSM( 'R', 'L', 'N', DIAG, M, K, ALPHA,
- + A( K*K ), K, B( 0, K ), LDB )
+ $ A( K*K ), K, B( 0, K ), LDB )
CALL ZGEMM( 'N', 'N', M, K, K, -CONE, B( 0, K ),
- + LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
+ $ LDB, A( 0 ), K, ALPHA, B( 0, 0 ), LDB )
CALL ZTRSM( 'R', 'U', 'C', DIAG, M, K, CONE,
- + A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
+ $ A( ( K+1 )*K ), K, B( 0, 0 ), LDB )
*
END IF
*
diff --git a/SRC/ztftri.f b/SRC/ztftri.f
index ce35fb38..529f0567 100644
--- a/SRC/ztftri.f
+++ b/SRC/ztftri.f
@@ -201,7 +201,7 @@
ELSE IF( .NOT.LOWER .AND. .NOT.LSAME( UPLO, 'U' ) ) THEN
INFO = -2
ELSE IF( .NOT.LSAME( DIAG, 'N' ) .AND. .NOT.LSAME( DIAG, 'U' ) )
- + THEN
+ $ THEN
INFO = -3
ELSE IF( N.LT.0 ) THEN
INFO = -4
@@ -214,7 +214,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
* If N is odd, set NISODD = .TRUE.
* If N is even, set K = N/2 and NISODD = .FALSE.
@@ -255,16 +255,16 @@
*
CALL ZTRTRI( 'L', DIAG, N1, A( 0 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'L', 'N', DIAG, N2, N1, -CONE, A( 0 ),
- + N, A( N1 ), N )
+ $ N, A( N1 ), N )
CALL ZTRTRI( 'U', DIAG, N2, A( N ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'U', 'C', DIAG, N2, N1, CONE, A( N ), N,
- + A( N1 ), N )
+ $ A( N1 ), N )
*
ELSE
*
@@ -274,16 +274,16 @@
*
CALL ZTRTRI( 'L', DIAG, N1, A( N2 ), N, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'L', 'C', DIAG, N1, N2, -CONE, A( N2 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
CALL ZTRTRI( 'U', DIAG, N2, A( N1 ), N, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'U', 'N', DIAG, N1, N2, CONE, A( N1 ),
- + N, A( 0 ), N )
+ $ N, A( 0 ), N )
*
END IF
*
@@ -298,16 +298,16 @@
*
CALL ZTRTRI( 'U', DIAG, N1, A( 0 ), N1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'U', 'N', DIAG, N1, N2, -CONE, A( 0 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
CALL ZTRTRI( 'L', DIAG, N2, A( 1 ), N1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'L', 'C', DIAG, N1, N2, CONE, A( 1 ),
- + N1, A( N1*N1 ), N1 )
+ $ N1, A( N1*N1 ), N1 )
*
ELSE
*
@@ -316,16 +316,16 @@
*
CALL ZTRTRI( 'U', DIAG, N1, A( N2*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'U', 'C', DIAG, N2, N1, -CONE,
- + A( N2*N2 ), N2, A( 0 ), N2 )
+ $ A( N2*N2 ), N2, A( 0 ), N2 )
CALL ZTRTRI( 'L', DIAG, N2, A( N1*N2 ), N2, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + N1
+ $ INFO = INFO + N1
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'L', 'N', DIAG, N2, N1, CONE,
- + A( N1*N2 ), N2, A( 0 ), N2 )
+ $ A( N1*N2 ), N2, A( 0 ), N2 )
END IF
*
END IF
@@ -346,16 +346,16 @@
*
CALL ZTRTRI( 'L', DIAG, K, A( 1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'L', 'N', DIAG, K, K, -CONE, A( 1 ),
- + N+1, A( K+1 ), N+1 )
+ $ N+1, A( K+1 ), N+1 )
CALL ZTRTRI( 'U', DIAG, K, A( 0 ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'U', 'C', DIAG, K, K, CONE, A( 0 ), N+1,
- + A( K+1 ), N+1 )
+ $ A( K+1 ), N+1 )
*
ELSE
*
@@ -365,16 +365,16 @@
*
CALL ZTRTRI( 'L', DIAG, K, A( K+1 ), N+1, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'L', 'C', DIAG, K, K, -CONE, A( K+1 ),
- + N+1, A( 0 ), N+1 )
+ $ N+1, A( 0 ), N+1 )
CALL ZTRTRI( 'U', DIAG, K, A( K ), N+1, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'U', 'N', DIAG, K, K, CONE, A( K ), N+1,
- + A( 0 ), N+1 )
+ $ A( 0 ), N+1 )
END IF
ELSE
*
@@ -388,16 +388,16 @@
*
CALL ZTRTRI( 'U', DIAG, K, A( K ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'U', 'N', DIAG, K, K, -CONE, A( K ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
CALL ZTRTRI( 'L', DIAG, K, A( 0 ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'L', 'C', DIAG, K, K, CONE, A( 0 ), K,
- + A( K*( K+1 ) ), K )
+ $ A( K*( K+1 ) ), K )
ELSE
*
* SRPA for UPPER, TRANSPOSE and N is even (see paper)
@@ -406,16 +406,16 @@
*
CALL ZTRTRI( 'U', DIAG, K, A( K*( K+1 ) ), K, INFO )
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'R', 'U', 'C', DIAG, K, K, -CONE,
- + A( K*( K+1 ) ), K, A( 0 ), K )
+ $ A( K*( K+1 ) ), K, A( 0 ), K )
CALL ZTRTRI( 'L', DIAG, K, A( K*K ), K, INFO )
IF( INFO.GT.0 )
- + INFO = INFO + K
+ $ INFO = INFO + K
IF( INFO.GT.0 )
- + RETURN
+ $ RETURN
CALL ZTRMM( 'L', 'L', 'N', DIAG, K, K, CONE, A( K*K ), K,
- + A( 0 ), K )
+ $ A( 0 ), K )
END IF
END IF
END IF
diff --git a/SRC/ztfttp.f b/SRC/ztfttp.f
index c7577eed..1e0cb6ae 100644
--- a/SRC/ztfttp.f
+++ b/SRC/ztfttp.f
@@ -200,7 +200,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -243,7 +243,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/ztfttr.f b/SRC/ztfttr.f
index 65d66fea..f70cc6c6 100644
--- a/SRC/ztfttr.f
+++ b/SRC/ztfttr.f
@@ -236,11 +236,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN
diff --git a/SRC/ztpttf.f b/SRC/ztpttf.f
index 44e3c2c9..26039092 100644
--- a/SRC/ztpttf.f
+++ b/SRC/ztpttf.f
@@ -198,7 +198,7 @@
* Quick return if possible
*
IF( N.EQ.0 )
- + RETURN
+ $ RETURN
*
IF( N.EQ.1 ) THEN
IF( NORMALTRANSR ) THEN
@@ -241,7 +241,7 @@
* ARF^C has lda rows and n+1-noe cols
*
IF( .NOT.NORMALTRANSR )
- + LDA = ( N+1 ) / 2
+ $ LDA = ( N+1 ) / 2
*
* start execution: there are eight cases
*
diff --git a/SRC/ztrttf.f b/SRC/ztrttf.f
index ba39ffd5..fdc626ca 100644
--- a/SRC/ztrttf.f
+++ b/SRC/ztrttf.f
@@ -235,11 +235,11 @@
K = N / 2
NISODD = .FALSE.
IF( .NOT.LOWER )
- + NP1X2 = N + N + 2
+ $ NP1X2 = N + N + 2
ELSE
NISODD = .TRUE.
IF( .NOT.LOWER )
- + NX2 = N + N
+ $ NX2 = N + N
END IF
*
IF( NISODD ) THEN