72 complex(
const double &_x,
const double &_y):
x(_x),
y(_y){};
90 if( fabs(z.y)<fabs(z.x) )
111 const complex
operator/(
const complex& lhs,
const complex& rhs);
112 bool operator==(
const complex& lhs,
const complex& rhs);
113 bool operator!=(
const complex& lhs,
const complex& rhs);
114 const complex
operator+(
const complex& lhs);
115 const complex
operator-(
const complex& lhs);
116 const complex
operator+(
const complex& lhs,
const complex& rhs);
117 const complex
operator+(
const complex& lhs,
const double& rhs);
118 const complex
operator+(
const double& lhs,
const complex& rhs);
119 const complex
operator-(
const complex& lhs,
const complex& rhs);
120 const complex
operator-(
const complex& lhs,
const double& rhs);
121 const complex
operator-(
const double& lhs,
const complex& rhs);
122 const complex
operator*(
const complex& lhs,
const complex& rhs);
123 const complex
operator*(
const complex& lhs,
const double& rhs);
124 const complex
operator*(
const double& lhs,
const complex& rhs);
125 const complex
operator/(
const complex& lhs,
const complex& rhs);
126 const complex
operator/(
const double& lhs,
const complex& rhs);
127 const complex
operator/(
const complex& lhs,
const double& rhs);
129 const complex
conj(
const complex &z);
130 const complex
csqr(
const complex &z);
144 class const_raw_vector
192 if( v1.GetStep()==1 && v2.GetStep()==1 )
198 const T *p1 = v1.GetData();
199 const T *p2 = v2.GetData();
200 int imax = v1.GetLength()/4;
202 for(
i=imax;
i!=0;
i--)
204 r += (*p1)*(*p2) + p1[1]*p2[1] + p1[2]*p2[2] + p1[3]*p2[3];
208 for(
i=0;
i<v1.GetLength()%4;
i++)
209 r += (*(p1++))*(*(p2++));
217 int offset11 = v1.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
218 int offset21 = v2.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
220 const T *p1 = v1.GetData();
221 const T *p2 = v2.GetData();
222 int imax = v1.GetLength()/4;
224 for(
i=0;
i<imax;
i++)
226 r += (*p1)*(*p2) + p1[offset11]*p2[offset21] + p1[offset12]*p2[offset22] + p1[offset13]*p2[offset23];
230 for(
i=0;
i<v1.GetLength()%4;
i++)
248 if( vdst.
GetStep()==1 && vsrc.GetStep()==1 )
254 const T *p2 = vsrc.GetData();
257 for(
i=imax;
i!=0;
i--)
273 int offset11 = vdst.
GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
274 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
276 const T *p2 = vsrc.GetData();
279 for(
i=0;
i<imax;
i++)
282 p1[offset11] = p2[offset21];
283 p1[offset12] = p2[offset22];
284 p1[offset13] = p2[offset23];
292 p2 += vsrc.GetStep();
303 void vmoveneg(raw_vector<T> vdst, const_raw_vector<T> vsrc)
306 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
311 T *p1 = vdst.GetData();
312 const T *p2 = vsrc.GetData();
313 int imax = vdst.GetLength()/2;
315 for(
i=0;
i<imax;
i++)
322 if(vdst.GetLength()%2 != 0)
331 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
332 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
333 T *p1 = vdst.GetData();
334 const T *p2 = vsrc.GetData();
335 int imax = vdst.GetLength()/4;
337 for(
i=imax;
i!=0;
i--)
340 p1[offset11] = -p2[offset21];
341 p1[offset12] = -p2[offset22];
342 p1[offset13] = -p2[offset23];
346 for(
i=0;
i<vdst.GetLength()%4;
i++)
349 p1 += vdst.GetStep();
350 p2 += vsrc.GetStep();
360 template<
class T,
class T2>
361 void vmove(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
364 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
369 T *p1 = vdst.GetData();
370 const T *p2 = vsrc.GetData();
371 int imax = vdst.GetLength()/4;
373 for(
i=imax;
i!=0;
i--)
382 for(
i=0;
i<vdst.GetLength()%4;
i++)
383 *(p1++) =
alpha*(*(p2++));
391 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
392 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
393 T *p1 = vdst.GetData();
394 const T *p2 = vsrc.GetData();
395 int imax = vdst.GetLength()/4;
397 for(
i=0;
i<imax;
i++)
400 p1[offset11] =
alpha*p2[offset21];
401 p1[offset12] =
alpha*p2[offset22];
402 p1[offset13] =
alpha*p2[offset23];
406 for(
i=0;
i<vdst.GetLength()%4;
i++)
409 p1 += vdst.GetStep();
410 p2 += vsrc.GetStep();
421 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc)
424 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
429 T *p1 = vdst.GetData();
430 const T *p2 = vsrc.GetData();
431 int imax = vdst.GetLength()/4;
433 for(
i=imax;
i!=0;
i--)
442 for(
i=0;
i<vdst.GetLength()%4;
i++)
451 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
452 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
453 T *p1 = vdst.GetData();
454 const T *p2 = vsrc.GetData();
455 int imax = vdst.GetLength()/4;
457 for(
i=0;
i<imax;
i++)
460 p1[offset11] += p2[offset21];
461 p1[offset12] += p2[offset22];
462 p1[offset13] += p2[offset23];
466 for(
i=0;
i<vdst.GetLength()%4;
i++)
469 p1 += vdst.GetStep();
470 p2 += vsrc.GetStep();
480 template<
class T,
class T2>
481 void vadd(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
484 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
489 T *p1 = vdst.GetData();
490 const T *p2 = vsrc.GetData();
491 int imax = vdst.GetLength()/4;
493 for(
i=imax;
i!=0;
i--)
496 p1[1] +=
alpha*p2[1];
497 p1[2] +=
alpha*p2[2];
498 p1[3] +=
alpha*p2[3];
502 for(
i=0;
i<vdst.GetLength()%4;
i++)
503 *(p1++) +=
alpha*(*(p2++));
511 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
512 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
513 T *p1 = vdst.GetData();
514 const T *p2 = vsrc.GetData();
515 int imax = vdst.GetLength()/4;
517 for(
i=0;
i<imax;
i++)
520 p1[offset11] +=
alpha*p2[offset21];
521 p1[offset12] +=
alpha*p2[offset22];
522 p1[offset13] +=
alpha*p2[offset23];
526 for(
i=0;
i<vdst.GetLength()%4;
i++)
529 p1 += vdst.GetStep();
530 p2 += vsrc.GetStep();
541 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc)
544 if( vdst.GetStep()==1 && vsrc.GetStep()==1 )
549 T *p1 = vdst.GetData();
550 const T *p2 = vsrc.GetData();
551 int imax = vdst.GetLength()/4;
553 for(
i=imax;
i!=0;
i--)
562 for(
i=0;
i<vdst.GetLength()%4;
i++)
571 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
572 int offset21 = vsrc.GetStep(), offset22 = 2*offset21, offset23 = 3*offset21, offset24 = 4*offset21;
573 T *p1 = vdst.GetData();
574 const T *p2 = vsrc.GetData();
575 int imax = vdst.GetLength()/4;
577 for(
i=0;
i<imax;
i++)
580 p1[offset11] -= p2[offset21];
581 p1[offset12] -= p2[offset22];
582 p1[offset13] -= p2[offset23];
586 for(
i=0;
i<vdst.GetLength()%4;
i++)
589 p1 += vdst.GetStep();
590 p2 += vsrc.GetStep();
600 template<
class T,
class T2>
601 void vsub(raw_vector<T> vdst, const_raw_vector<T> vsrc,
T2 alpha)
610 template<
class T,
class T2>
613 if( vdst.GetStep()==1 )
618 T *p1 = vdst.GetData();
619 int imax = vdst.GetLength()/4;
621 for(
i=imax;
i!=0;
i--)
629 for(
i=0;
i<vdst.GetLength()%4;
i++)
638 int offset11 = vdst.GetStep(), offset12 = 2*offset11, offset13 = 3*offset11, offset14 = 4*offset11;
639 T *p1 = vdst.GetData();
640 int imax = vdst.GetLength()/4;
642 for(
i=0;
i<imax;
i++)
645 p1[offset11] *=
alpha;
646 p1[offset12] *=
alpha;
647 p1[offset13] *=
alpha;
650 for(
i=0;
i<vdst.GetLength()%4;
i++)
653 p1 += vdst.GetStep();
687 #ifndef UNSAFE_MEM_COPY
712 #ifndef UNSAFE_MEM_COPY
754 void setcontent(
int iLow,
int iHigh,
const T *pContent )
757 for(
int i=iLow;
i<=iHigh;
i++)
758 (*
this)(
i) = pContent[
i-iLow];
841 #ifndef UNSAFE_MEM_COPY
868 #ifndef UNSAFE_MEM_COPY
898 void setbounds(
int iLow1,
int iHigh1,
int iLow2,
int iHigh2 )
902 m_iVecSize = (iHigh1-iLow1+1)*(iHigh2-iLow2+1);
912 void setcontent(
int iLow1,
int iHigh1,
int iLow2,
int iHigh2,
const T *pContent )
939 raw_vector<T>
getcolumn(
int iColumn,
int iRowStart,
int iRowEnd)
952 return raw_vector<T>(&((*
this)(iRow, iColumnStart)), iColumnEnd-iColumnStart+1, 1);
1006 double sqr(
double x);
1007 int maxint(
int m1,
int m2);
1008 int minint(
int m1,
int m2);
1009 double maxreal(
double m1,
double m2);
1010 double minreal(
double m1,
double m2);
1020 #include <stdexcept>
1034 class incorrectPrecision :
public exception {};
1035 class overflow :
public exception {};
1036 class divisionByZero :
public exception {};
1037 class sqrtOfNegativeNumber :
public exception {};
1038 class invalidConversion :
public exception {};
1039 class invalidString :
public exception {};
1040 class internalError :
public exception {};
1041 class domainError :
public exception {};
1062 static mpfr_record*
newMpfr(
unsigned int Precision);
1073 class mpfr_reference
1093 template<
unsigned int Precision>
1140 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1141 template<
unsigned int Precision2>
1146 mpfr_set(
getWritePtr(), r.getReadPtr(), GMP_RNDN);
1181 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
1182 template<
unsigned int Precision2>
1185 if( (
void*)
this==(
void*)(&r) )
1187 mpfr_set(
getWritePtr(), r.getReadPtr(), GMP_RNDN);
1196 template<
class T>
ampf&
operator+=(
const T&
v){ *
this = *
this +
v;
return *
this; };
1197 template<
class T>
ampf&
operator-=(
const T&
v){ *
this = *
this -
v;
return *
this; };
1198 template<
class T>
ampf&
operator*=(
const T&
v){ *
this = *
this *
v;
return *
this; };
1199 template<
class T>
ampf&
operator/=(
const T&
v){ *
this = *
this /
v;
return *
this; };
1256 template<
unsigned int Precision>
1261 WerrorS(
"incorrectPrecision");
1264 template<
unsigned int Precision>
1269 mpfr_set_ui(getWritePtr(), 0, GMP_RNDN);
1272 template<
unsigned int Precision>
1277 mpfr_set_si(getWritePtr(), sv, GMP_RNDN);
1280 template<
unsigned int Precision>
1285 mpfr_set_ui(getWritePtr(),
v, GMP_RNDN);
1288 template<
unsigned int Precision>
1293 mpfr_set_ld(getWritePtr(),
v, GMP_RNDN);
1296 template<
unsigned int Precision>
1301 mpfr_strtofr(getWritePtr(),
s,
NULL, 0, GMP_RNDN);
1304 template<
unsigned int Precision>
1310 template<
unsigned int Precision>
1313 if( rval->refCount==1 )
1316 mpfr_set(newrval->value, rval->value, GMP_RNDN);
1322 template<
unsigned int Precision>
1325 return mpfr_number_p(getReadPtr())!=0;
1328 template<
unsigned int Precision>
1331 if( !isFiniteNumber() )
1333 return mpfr_sgn(getReadPtr())>0;
1336 template<
unsigned int Precision>
1339 return mpfr_zero_p(getReadPtr())!=0;
1342 template<
unsigned int Precision>
1345 if( !isFiniteNumber() )
1347 return mpfr_sgn(getReadPtr())<0;
1350 template<
unsigned int Precision>
1353 return getUlpOf(*
this);
1356 template<
unsigned int Precision>
1359 return mpfr_get_d(getReadPtr(), GMP_RNDN);
1362 template<
unsigned int Precision>
1368 if( !isFiniteNumber() )
1373 ptr = mpfr_get_str(
NULL, &_e, 16, 0, getReadPtr(), GMP_RNDN);
1384 signed long iexpval;
1388 ptr = mpfr_get_str(
NULL, &expval, 16, 0, getReadPtr(), GMP_RNDN);
1391 if( iexpval!=expval )
1394 sprintf(buf_e,
"%ld",
long(iexpval));
1404 mpfr_free_str(ptr2);
1408 template<
unsigned int Precision>
1416 if( !isFiniteNumber() )
1421 ptr = mpfr_get_str(
NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1432 signed long iexpval;
1436 ptr = mpfr_get_str(
NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1439 if( iexpval!=expval )
1442 sprintf(buf_e,
"%ld",
long(iexpval));
1452 mpfr_free_str(ptr2);
1455 template<
unsigned int Precision>
1458 char *toString_Block=(
char *)
omAlloc(256);
1462 if( !isFiniteNumber() )
1466 ptr = mpfr_get_str(
NULL, &_e, 10, 0, getReadPtr(), GMP_RNDN);
1467 strcpy(toString_Block, ptr);
1469 return toString_Block;
1477 signed long iexpval;
1481 ptr = mpfr_get_str(
NULL, &expval, 10, 0, getReadPtr(), GMP_RNDN);
1484 if( iexpval!=expval )
1487 sprintf(buf_e,
"%ld",
long(iexpval));
1491 sprintf(toString_Block,
"-0.%sE%s",ptr,buf_e);
1494 sprintf(toString_Block,
"0.%sE%s",ptr,buf_e);
1495 mpfr_free_str(ptr2);
1496 return toString_Block;
1499 template<
unsigned int Precision>
1502 if( !
x.isFiniteNumber() )
1506 ampf<Precision> r(1);
1507 mpfr_nextabove(r.getWritePtr());
1508 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1512 mpfr_get_exp(
x.getReadPtr()),
1522 template<
unsigned int Precision>
1525 ampf<Precision> r(1);
1526 mpfr_nextabove(r.getWritePtr());
1527 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1531 template<
unsigned int Precision>
1534 ampf<Precision> r(1);
1535 mpfr_nextabove(r.getWritePtr());
1536 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1545 template<
unsigned int Precision>
1548 ampf<Precision> r(1);
1549 mpfr_nextabove(r.getWritePtr());
1550 mpfr_sub_ui(r.getWritePtr(), r.getWritePtr(), 1, GMP_RNDN);
1559 template<
unsigned int Precision>
1562 ampf<Precision> r(1);
1563 mpfr_nextbelow(r.getWritePtr());
1564 mpfr_set_exp(r.getWritePtr(),mpfr_get_emax());
1568 template<
unsigned int Precision>
1571 ampf<Precision> r(1);
1572 mpfr_set_exp(r.getWritePtr(),mpfr_get_emin());
1576 template<
unsigned int Precision>
1582 template<
unsigned int Precision>
1585 ampf<Precision> r(1);
1586 mp_exp_t e1 = mpfr_get_emax();
1587 mp_exp_t e2 = -mpfr_get_emin();
1588 mp_exp_t e = e1>e2 ? e1 : e2;
1589 mpfr_set_exp(r.getWritePtr(), e-5);
1593 template<
unsigned int Precision>
1596 ampf<Precision> r(1);
1597 mp_exp_t e1 = mpfr_get_emax();
1598 mp_exp_t e2 = -mpfr_get_emin();
1599 mp_exp_t e = e1>e2 ? e1 : e2;
1600 mpfr_set_exp(r.getWritePtr(), 2-(e-5));
1604 template<
unsigned int Precision>
1615 template<
unsigned int Precision>
1618 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())==0;
1621 template<
unsigned int Precision>
1622 bool operator!=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1624 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())!=0;
1627 template<
unsigned int Precision>
1628 bool operator<(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1630 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<0;
1633 template<
unsigned int Precision>
1634 bool operator>(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1636 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>0;
1639 template<
unsigned int Precision>
1640 bool operator<=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1642 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())<=0;
1645 template<
unsigned int Precision>
1646 bool operator>=(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1648 return mpfr_cmp(op1.getReadPtr(), op2.getReadPtr())>=0;
1654 template<
unsigned int Precision>
1655 const ampf<Precision>
operator+(
const ampf<Precision>& op1)
1660 template<
unsigned int Precision>
1661 const ampf<Precision>
operator-(
const ampf<Precision>& op1)
1664 mpfr_neg(
v->value, op1.getReadPtr(), GMP_RNDN);
1668 template<
unsigned int Precision>
1669 const ampf<Precision>
operator+(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1672 mpfr_add(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1676 template<
unsigned int Precision>
1677 const ampf<Precision>
operator-(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1680 mpfr_sub(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1685 template<
unsigned int Precision>
1686 const ampf<Precision>
operator*(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1689 mpfr_mul(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1693 template<
unsigned int Precision>
1694 const ampf<Precision>
operator/(
const ampf<Precision>& op1,
const ampf<Precision>& op2)
1697 mpfr_div(
v->value, op1.getReadPtr(), op2.getReadPtr(), GMP_RNDN);
1704 template<
unsigned int Precision>
1705 const ampf<Precision>
sqr(
const ampf<Precision> &
x)
1708 ampf<Precision>
res;
1709 mpfr_sqr(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1713 template<
unsigned int Precision>
1714 int sign(
const ampf<Precision> &
x)
1716 int s = mpfr_sgn(
x.getReadPtr());
1724 template<
unsigned int Precision>
1725 const ampf<Precision>
abs(
const ampf<Precision> &
x)
1728 ampf<Precision>
res;
1729 mpfr_abs(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1733 template<
unsigned int Precision>
1734 const ampf<Precision>
maximum(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
1737 ampf<Precision>
res;
1738 mpfr_max(
res.getWritePtr(),
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
1742 template<
unsigned int Precision>
1743 const ampf<Precision>
minimum(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
1746 ampf<Precision>
res;
1747 mpfr_min(
res.getWritePtr(),
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
1751 template<
unsigned int Precision>
1752 const ampf<Precision>
sqrt(
const ampf<Precision> &
x)
1755 ampf<Precision>
res;
1756 mpfr_sqrt(
res.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1760 template<
unsigned int Precision>
1761 signed long trunc(
const ampf<Precision> &
x)
1763 ampf<Precision> tmp;
1765 mpfr_trunc(tmp.getWritePtr(),
x.getReadPtr());
1766 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1769 mpfr_clear_erangeflag();
1770 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1771 if( mpfr_erangeflag_p()!=0 )
1777 template<
unsigned int Precision>
1778 const ampf<Precision>
frac(
const ampf<Precision> &
x)
1782 mpfr_frac(r.getWritePtr(),
x.getReadPtr(), GMP_RNDN);
1786 template<
unsigned int Precision>
1787 signed long floor(
const ampf<Precision> &
x)
1789 ampf<Precision> tmp;
1791 mpfr_floor(tmp.getWritePtr(),
x.getReadPtr());
1792 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1795 mpfr_clear_erangeflag();
1796 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1797 if( mpfr_erangeflag_p()!=0 )
1803 template<
unsigned int Precision>
1804 signed long ceil(
const ampf<Precision> &
x)
1806 ampf<Precision> tmp;
1808 mpfr_ceil(tmp.getWritePtr(),
x.getReadPtr());
1809 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1812 mpfr_clear_erangeflag();
1813 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1814 if( mpfr_erangeflag_p()!=0 )
1820 template<
unsigned int Precision>
1821 signed long round(
const ampf<Precision> &
x)
1823 ampf<Precision> tmp;
1825 mpfr_round(tmp.getWritePtr(),
x.getReadPtr());
1826 if( mpfr_integer_p(tmp.getReadPtr())==0 )
1829 mpfr_clear_erangeflag();
1830 r = mpfr_get_si(tmp.getReadPtr(), GMP_RNDN);
1831 if( mpfr_erangeflag_p()!=0 )
1837 template<
unsigned int Precision>
1838 const ampf<Precision>
frexp2(
const ampf<Precision> &
x, mp_exp_t *
exponent)
1842 if( !
x.isFiniteNumber() )
1852 *
exponent = mpfr_get_exp(r.getReadPtr());
1853 mpfr_set_exp(r.getWritePtr(),0);
1857 template<
unsigned int Precision>
1858 const ampf<Precision>
ldexp2(
const ampf<Precision> &
x, mp_exp_t
exponent)
1862 mpfr_mul_2si(r.getWritePtr(),
x.getReadPtr(),
exponent, GMP_RNDN);
1869 #define __AMP_BINARY_OPI(type) \
1870 template<unsigned int Precision> const ampf<Precision> operator+(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1871 template<unsigned int Precision> const ampf<Precision> operator+(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1872 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const signed type& op2) { return op1+ampf<Precision>(op2); } \
1873 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const unsigned type& op2) { return op1+ampf<Precision>(op2); } \
1874 template<unsigned int Precision> const ampf<Precision> operator-(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1875 template<unsigned int Precision> const ampf<Precision> operator-(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1876 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const signed type& op2) { return op1-ampf<Precision>(op2); } \
1877 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const unsigned type& op2) { return op1-ampf<Precision>(op2); } \
1878 template<unsigned int Precision> const ampf<Precision> operator*(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1879 template<unsigned int Precision> const ampf<Precision> operator*(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1880 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const signed type& op2) { return op1*ampf<Precision>(op2); } \
1881 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const unsigned type& op2) { return op1*ampf<Precision>(op2); } \
1882 template<unsigned int Precision> const ampf<Precision> operator/(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1883 template<unsigned int Precision> const ampf<Precision> operator/(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1884 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const signed type& op2) { return op1/ampf<Precision>(op2); } \
1885 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const unsigned type& op2) { return op1/ampf<Precision>(op2); } \
1886 template<unsigned int Precision> bool operator==(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1887 template<unsigned int Precision> bool operator==(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1888 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const signed type& op2) { return op1==ampf<Precision>(op2); } \
1889 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const unsigned type& op2) { return op1==ampf<Precision>(op2); } \
1890 template<unsigned int Precision> bool operator!=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1891 template<unsigned int Precision> bool operator!=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1892 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const signed type& op2) { return op1!=ampf<Precision>(op2); } \
1893 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const unsigned type& op2) { return op1!=ampf<Precision>(op2); } \
1894 template<unsigned int Precision> bool operator<=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1895 template<unsigned int Precision> bool operator<=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1896 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const signed type& op2) { return op1<=ampf<Precision>(op2); } \
1897 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const unsigned type& op2) { return op1<=ampf<Precision>(op2); } \
1898 template<unsigned int Precision> bool operator>=(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1899 template<unsigned int Precision> bool operator>=(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1900 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const signed type& op2) { return op1>=ampf<Precision>(op2); } \
1901 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const unsigned type& op2) { return op1>=ampf<Precision>(op2); } \
1902 template<unsigned int Precision> bool operator<(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1903 template<unsigned int Precision> bool operator<(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1904 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const signed type& op2) { return op1<ampf<Precision>(op2); } \
1905 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const unsigned type& op2) { return op1<ampf<Precision>(op2); } \
1906 template<unsigned int Precision> bool operator>(const signed type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1907 template<unsigned int Precision> bool operator>(const unsigned type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1908 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const signed type& op2) { return op1>ampf<Precision>(op2); } \
1909 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const unsigned type& op2) { return op1>ampf<Precision>(op2); }
1914 #undef __AMP_BINARY_OPI
1915 #define __AMP_BINARY_OPF(type) \
1916 template<unsigned int Precision> const ampf<Precision> operator+(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)+op2; } \
1917 template<unsigned int Precision> const ampf<Precision> operator+(const ampf<Precision>& op1, const type& op2) { return op1+ampf<Precision>(op2); } \
1918 template<unsigned int Precision> const ampf<Precision> operator-(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)-op2; } \
1919 template<unsigned int Precision> const ampf<Precision> operator-(const ampf<Precision>& op1, const type& op2) { return op1-ampf<Precision>(op2); } \
1920 template<unsigned int Precision> const ampf<Precision> operator*(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)*op2; } \
1921 template<unsigned int Precision> const ampf<Precision> operator*(const ampf<Precision>& op1, const type& op2) { return op1*ampf<Precision>(op2); } \
1922 template<unsigned int Precision> const ampf<Precision> operator/(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)/op2; } \
1923 template<unsigned int Precision> const ampf<Precision> operator/(const ampf<Precision>& op1, const type& op2) { return op1/ampf<Precision>(op2); } \
1924 template<unsigned int Precision> bool operator==(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)==op2; } \
1925 template<unsigned int Precision> bool operator==(const ampf<Precision>& op1, const type& op2) { return op1==ampf<Precision>(op2); } \
1926 template<unsigned int Precision> bool operator!=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)!=op2; } \
1927 template<unsigned int Precision> bool operator!=(const ampf<Precision>& op1, const type& op2) { return op1!=ampf<Precision>(op2); } \
1928 template<unsigned int Precision> bool operator<=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<=op2; } \
1929 template<unsigned int Precision> bool operator<=(const ampf<Precision>& op1, const type& op2) { return op1<=ampf<Precision>(op2); } \
1930 template<unsigned int Precision> bool operator>=(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>=op2; } \
1931 template<unsigned int Precision> bool operator>=(const ampf<Precision>& op1, const type& op2) { return op1>=ampf<Precision>(op2); } \
1932 template<unsigned int Precision> bool operator<(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)<op2; } \
1933 template<unsigned int Precision> bool operator<(const ampf<Precision>& op1, const type& op2) { return op1<ampf<Precision>(op2); } \
1934 template<unsigned int Precision> bool operator>(const type& op1, const ampf<Precision>& op2) { return ampf<Precision>(op1)>op2; } \
1935 template<unsigned int Precision> bool operator>(const ampf<Precision>& op1, const type& op2) { return op1>ampf<Precision>(op2); }
1939 #undef __AMP_BINARY_OPF
1944 template<
unsigned int Precision>
1945 const ampf<Precision>
pi()
1948 mpfr_const_pi(
v->value, GMP_RNDN);
1952 template<
unsigned int Precision>
1953 const ampf<Precision>
halfpi()
1956 mpfr_const_pi(
v->value, GMP_RNDN);
1957 mpfr_mul_2si(
v->value,
v->value, -1, GMP_RNDN);
1961 template<
unsigned int Precision>
1962 const ampf<Precision>
twopi()
1965 mpfr_const_pi(
v->value, GMP_RNDN);
1966 mpfr_mul_2si(
v->value,
v->value, +1, GMP_RNDN);
1970 template<
unsigned int Precision>
1971 const ampf<Precision>
sin(
const ampf<Precision> &
x)
1974 mpfr_sin(
v->value,
x.getReadPtr(), GMP_RNDN);
1978 template<
unsigned int Precision>
1979 const ampf<Precision>
cos(
const ampf<Precision> &
x)
1982 mpfr_cos(
v->value,
x.getReadPtr(), GMP_RNDN);
1986 template<
unsigned int Precision>
1987 const ampf<Precision>
tan(
const ampf<Precision> &
x)
1990 mpfr_tan(
v->value,
x.getReadPtr(), GMP_RNDN);
1994 template<
unsigned int Precision>
1995 const ampf<Precision>
asin(
const ampf<Precision> &
x)
1998 mpfr_asin(
v->value,
x.getReadPtr(), GMP_RNDN);
2002 template<
unsigned int Precision>
2003 const ampf<Precision>
acos(
const ampf<Precision> &
x)
2006 mpfr_acos(
v->value,
x.getReadPtr(), GMP_RNDN);
2010 template<
unsigned int Precision>
2011 const ampf<Precision>
atan(
const ampf<Precision> &
x)
2014 mpfr_atan(
v->value,
x.getReadPtr(), GMP_RNDN);
2018 template<
unsigned int Precision>
2019 const ampf<Precision>
atan2(
const ampf<Precision> &
y,
const ampf<Precision> &
x)
2022 mpfr_atan2(
v->value,
y.getReadPtr(),
x.getReadPtr(), GMP_RNDN);
2026 template<
unsigned int Precision>
2027 const ampf<Precision>
log(
const ampf<Precision> &
x)
2030 mpfr_log(
v->value,
x.getReadPtr(), GMP_RNDN);
2034 template<
unsigned int Precision>
2035 const ampf<Precision>
log2(
const ampf<Precision> &
x)
2038 mpfr_log2(
v->value,
x.getReadPtr(), GMP_RNDN);
2042 template<
unsigned int Precision>
2043 const ampf<Precision>
log10(
const ampf<Precision> &
x)
2046 mpfr_log10(
v->value,
x.getReadPtr(), GMP_RNDN);
2050 template<
unsigned int Precision>
2051 const ampf<Precision>
exp(
const ampf<Precision> &
x)
2054 mpfr_exp(
v->value,
x.getReadPtr(), GMP_RNDN);
2058 template<
unsigned int Precision>
2059 const ampf<Precision>
sinh(
const ampf<Precision> &
x)
2062 mpfr_sinh(
v->value,
x.getReadPtr(), GMP_RNDN);
2066 template<
unsigned int Precision>
2067 const ampf<Precision>
cosh(
const ampf<Precision> &
x)
2070 mpfr_cosh(
v->value,
x.getReadPtr(), GMP_RNDN);
2074 template<
unsigned int Precision>
2075 const ampf<Precision>
tanh(
const ampf<Precision> &
x)
2078 mpfr_tanh(
v->value,
x.getReadPtr(), GMP_RNDN);
2082 template<
unsigned int Precision>
2083 const ampf<Precision>
pow(
const ampf<Precision> &
x,
const ampf<Precision> &
y)
2086 mpfr_pow(
v->value,
x.getReadPtr(),
y.getReadPtr(), GMP_RNDN);
2093 template<
unsigned int Precision>
2109 campf(
const ampf<Precision> &_x):
x(_x),
y(0){};
2112 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2113 template<
unsigned int Prec2>
2136 #ifndef _AMP_NO_TEMPLATE_CONSTRUCTORS
2137 template<
unsigned int Precision2>
2146 ampf<Precision>
x,
y;
2152 template<
unsigned int Precision>
2154 {
return lhs.
x==rhs.
x && lhs.
y==rhs.
y; }
2156 template<
unsigned int Precision>
2157 bool operator!=(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2158 {
return lhs.x!=rhs.x || lhs.y!=rhs.y; }
2160 template<
unsigned int Precision>
2161 const campf<Precision>
operator+(
const campf<Precision>& lhs)
2164 template<
unsigned int Precision>
2165 campf<Precision>&
operator+=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2166 { lhs.x += rhs.x; lhs.y += rhs.y;
return lhs; }
2168 template<
unsigned int Precision>
2169 const campf<Precision>
operator+(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2170 { campf<Precision> r = lhs; r += rhs;
return r; }
2172 template<
unsigned int Precision>
2173 const campf<Precision>
operator-(
const campf<Precision>& lhs)
2174 {
return campf<Precision>(-lhs.x, -lhs.y); }
2176 template<
unsigned int Precision>
2177 campf<Precision>&
operator-=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2178 { lhs.x -= rhs.x; lhs.y -= rhs.y;
return lhs; }
2180 template<
unsigned int Precision>
2181 const campf<Precision>
operator-(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2182 { campf<Precision> r = lhs; r -= rhs;
return r; }
2184 template<
unsigned int Precision>
2185 campf<Precision>&
operator*=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2187 ampf<Precision> xx(lhs.x*rhs.x), yy(lhs.y*rhs.y), mm((lhs.x+lhs.y)*(rhs.x+rhs.y));
2193 template<
unsigned int Precision>
2194 const campf<Precision>
operator*(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2195 { campf<Precision> r = lhs; r *= rhs;
return r; }
2197 template<
unsigned int Precision>
2198 const campf<Precision>
operator/(
const campf<Precision>& lhs,
const campf<Precision>& rhs)
2203 if(
abs(rhs.y)<
abs(rhs.x) )
2215 result.y = (-lhs.x+lhs.y*e)/
f;
2220 template<
unsigned int Precision>
2221 campf<Precision>&
operator/=(campf<Precision>& lhs,
const campf<Precision>& rhs)
2227 template<
unsigned int Precision>
2228 const ampf<Precision>
abscomplex(
const campf<Precision> &z)
2230 ampf<Precision>
w, xabs, yabs,
v;
2234 w = xabs>yabs ? xabs : yabs;
2235 v = xabs<yabs ? xabs : yabs;
2240 ampf<Precision> t =
v/
w;
2245 template<
unsigned int Precision>
2246 const campf<Precision>
conj(
const campf<Precision> &z)
2248 return campf<Precision>(z.x, -z.y);
2251 template<
unsigned int Precision>
2252 const campf<Precision>
csqr(
const campf<Precision> &z)
2254 ampf<Precision> t = z.x*z.y;
2255 return campf<Precision>(
sqr(z.x)-
sqr(z.y), t+t);
2261 #define __AMP_BINARY_OPI(type) \
2262 template<unsigned int Precision> const campf<Precision> operator+ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2263 template<unsigned int Precision> const campf<Precision> operator+ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2264 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2265 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2266 template<unsigned int Precision> const campf<Precision> operator- (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2267 template<unsigned int Precision> const campf<Precision> operator- (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2268 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2269 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2270 template<unsigned int Precision> const campf<Precision> operator* (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2271 template<unsigned int Precision> const campf<Precision> operator* (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2272 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2273 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2274 template<unsigned int Precision> const campf<Precision> operator/ (const signed type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2275 template<unsigned int Precision> const campf<Precision> operator/ (const unsigned type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2276 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const signed type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2277 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const unsigned type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2278 template<unsigned int Precision> bool operator==(const signed type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2279 template<unsigned int Precision> bool operator==(const unsigned type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2280 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const signed type& op2) { return op1.x==op2 && op1.y==0; } \
2281 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const unsigned type& op2) { return op1.x==op2 && op1.y==0; } \
2282 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const signed type& op2) { return op1.x!=op2 || op1.y!=0; } \
2283 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const unsigned type& op2) { return op1.x!=op2 || op1.y!=0; } \
2284 template<unsigned int Precision> bool operator!=(const signed type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2285 template<unsigned int Precision> bool operator!=(const unsigned type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; }
2290 #undef __AMP_BINARY_OPI
2291 #define __AMP_BINARY_OPF(type) \
2292 template<unsigned int Precision> const campf<Precision> operator+ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1+op2.x, op2.y); } \
2293 template<unsigned int Precision> const campf<Precision> operator+ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x+op2, op1.y); } \
2294 template<unsigned int Precision> const campf<Precision> operator- (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1-op2.x, -op2.y); } \
2295 template<unsigned int Precision> const campf<Precision> operator- (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x-op2, op1.y); } \
2296 template<unsigned int Precision> const campf<Precision> operator* (const type& op1, const campf<Precision>& op2) { return campf<Precision>(op1*op2.x, op1*op2.y); } \
2297 template<unsigned int Precision> const campf<Precision> operator* (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op2*op1.x, op2*op1.y); } \
2298 template<unsigned int Precision> const campf<Precision> operator/ (const type& op1, const campf<Precision>& op2) { return campf<Precision>(ampf<Precision>(op1),ampf<Precision>(0))/op2; } \
2299 template<unsigned int Precision> const campf<Precision> operator/ (const campf<Precision>& op1, const type& op2) { return campf<Precision>(op1.x/op2, op1.y/op2); } \
2300 template<unsigned int Precision> bool operator==(const type& op1, const campf<Precision>& op2) { return op1==op2.x && op2.y==0; } \
2301 template<unsigned int Precision> bool operator==(const campf<Precision>& op1, const type& op2) { return op1.x==op2 && op1.y==0; } \
2302 template<unsigned int Precision> bool operator!=(const type& op1, const campf<Precision>& op2) { return op1!=op2.x || op2.y!=0; } \
2303 template<unsigned int Precision> bool operator!=(const campf<Precision>& op1, const type& op2) { return op1.x!=op2 || op1.y!=0; }
2308 #undef __AMP_BINARY_OPF
2313 template<
unsigned int Precision>
2317 int i, cnt = v1.GetLength();
2318 const ampf<Precision> *p1 = v1.GetData();
2319 const ampf<Precision> *p2 = v2.GetData();
2320 mpfr_record *r =
NULL;
2321 mpfr_record *t =
NULL;
2326 mpfr_set_ui(r->value, 0, GMP_RNDN);
2327 for(
i=0;
i<cnt;
i++)
2329 mpfr_mul(t->value, p1->getReadPtr(), p2->getReadPtr(), GMP_RNDN);
2330 mpfr_add(r->value, r->value, t->value, GMP_RNDN);
2347 template<
unsigned int Precision>
2351 int i, cnt = vDst.GetLength();
2352 ampf<Precision> *pDst = vDst.GetData();
2353 const ampf<Precision> *pSrc = vSrc.GetData();
2356 for(
i=0;
i<cnt;
i++)
2359 pDst += vDst.GetStep();
2360 pSrc += vSrc.GetStep();
2364 template<
unsigned int Precision>
2368 int i, cnt = vDst.GetLength();
2369 ampf<Precision> *pDst = vDst.GetData();
2370 const ampf<Precision> *pSrc = vSrc.GetData();
2371 for(
i=0;
i<cnt;
i++)
2374 mpfr_ptr
v = pDst->getWritePtr();
2375 mpfr_neg(
v,
v, GMP_RNDN);
2376 pDst += vDst.GetStep();
2377 pSrc += vSrc.GetStep();
2381 template<
unsigned int Precision,
class T2>
2385 int i, cnt = vDst.GetLength();
2386 ampf<Precision> *pDst = vDst.GetData();
2387 const ampf<Precision> *pSrc = vSrc.GetData();
2388 ampf<Precision> a(
alpha);
2389 for(
i=0;
i<cnt;
i++)
2392 mpfr_ptr
v = pDst->getWritePtr();
2393 mpfr_mul(
v,
v, a.getReadPtr(), GMP_RNDN);
2394 pDst += vDst.GetStep();
2395 pSrc += vSrc.GetStep();
2399 template<
unsigned int Precision>
2403 int i, cnt = vDst.GetLength();
2404 ampf<Precision> *pDst = vDst.GetData();
2405 const ampf<Precision> *pSrc = vSrc.GetData();
2406 for(
i=0;
i<cnt;
i++)
2408 mpfr_ptr
v = pDst->getWritePtr();
2409 mpfr_srcptr vs = pSrc->getReadPtr();
2410 mpfr_add(
v,
v, vs, GMP_RNDN);
2411 pDst += vDst.GetStep();
2412 pSrc += vSrc.GetStep();
2416 template<
unsigned int Precision,
class T2>
2420 int i, cnt = vDst.GetLength();
2421 ampf<Precision> *pDst = vDst.GetData();
2422 const ampf<Precision> *pSrc = vSrc.GetData();
2423 ampf<Precision> a(
alpha), tmp;
2424 for(
i=0;
i<cnt;
i++)
2426 mpfr_ptr
v = pDst->getWritePtr();
2427 mpfr_srcptr vs = pSrc->getReadPtr();
2428 mpfr_mul(tmp.getWritePtr(), a.getReadPtr(), vs, GMP_RNDN);
2429 mpfr_add(
v,
v, tmp.getWritePtr(), GMP_RNDN);
2430 pDst += vDst.GetStep();
2431 pSrc += vSrc.GetStep();
2435 template<
unsigned int Precision>
2439 int i, cnt = vDst.GetLength();
2440 ampf<Precision> *pDst = vDst.GetData();
2441 const ampf<Precision> *pSrc = vSrc.GetData();
2442 for(
i=0;
i<cnt;
i++)
2444 mpfr_ptr
v = pDst->getWritePtr();
2445 mpfr_srcptr vs = pSrc->getReadPtr();
2446 mpfr_sub(
v,
v, vs, GMP_RNDN);
2447 pDst += vDst.GetStep();
2448 pSrc += vSrc.GetStep();
2452 template<
unsigned int Precision,
class T2>
2458 template<
unsigned int Precision,
class T2>
2461 int i, cnt = vDst.GetLength();
2462 ampf<Precision> *pDst = vDst.GetData();
2463 ampf<Precision> a(
alpha);
2464 for(
i=0;
i<cnt;
i++)
2466 mpfr_ptr
v = pDst->getWritePtr();
2467 mpfr_mul(
v, a.getReadPtr(),
v, GMP_RNDN);
2468 pDst += vDst.GetStep();
2515 template<
unsigned int Precision>
2519 template<
unsigned int Precision>
2528 template<
unsigned int Precision>
2579 template<
unsigned int Precision>
2609 mx = amp::maximum<Precision>(amp::abs<Precision>(
x(
j)), mx);
2616 xnorm = xnorm+amp::sqr<Precision>(
x(
j)/mx);
2618 xnorm = amp::sqrt<Precision>(xnorm)*mx;
2633 mx = amp::maximum<Precision>(amp::abs<Precision>(
alpha), amp::abs<Precision>(xnorm));
2634 beta = -mx*amp::sqrt<Precision>(amp::sqr<Precision>(
alpha/mx)+amp::sqr<Precision>(xnorm/mx));
2674 template<
unsigned int Precision>
2687 if(
tau==0 || n1>n2 || m1>m2 )
2695 for(
i=n1;
i<=n2;
i++)
2699 for(
i=m1;
i<=m2;
i++)
2702 ap::vadd(work.getvector(n1, n2), c.getrow(
i, n1, n2), t);
2708 for(
i=m1;
i<=m2;
i++)
2711 ap::vsub(c.getrow(
i, n1, n2), work.getvector(n1, n2), t);
2744 template<
unsigned int Precision>
2759 if(
tau==0 || n1>n2 || m1>m2 )
2768 for(
i=m1;
i<=m2;
i++)
2777 for(
i=m1;
i<=m2;
i++)
2780 ap::vsub(c.getrow(
i, n1, n2),
v.getvector(1, vm), t);
2827 template<
unsigned int Precision>
2833 template<
unsigned int Precision>
2840 template<
unsigned int Precision>
2850 template<
unsigned int Precision>
2857 template<
unsigned int Precision>
2867 template<
unsigned int Precision>
2874 template<
unsigned int Precision>
2880 template<
unsigned int Precision>
2887 template<
unsigned int Precision>
2897 template<
unsigned int Precision>
2904 template<
unsigned int Precision>
2914 template<
unsigned int Precision>
2973 template<
unsigned int Precision>
3003 tauq.setbounds(0, n-1);
3004 taup.setbounds(0, n-1);
3008 tauq.setbounds(0,
m-1);
3009 taup.setbounds(0,
m-1);
3017 for(
i=0;
i<=n-1;
i++)
3024 reflections::generatereflection<Precision>(t,
m-
i, ltau);
3032 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i,
m-1,
i+1, n-1, work);
3041 reflections::generatereflection<Precision>(t, n-1-
i, ltau);
3049 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m-1,
i+1, n-1, work);
3063 for(
i=0;
i<=
m-1;
i++)
3070 reflections::generatereflection<Precision>(t, n-
i, ltau);
3078 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m-1,
i, n-1, work);
3087 reflections::generatereflection<Precision>(t,
m-1-
i, ltau);
3095 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i+1,
m-1,
i+1, n-1, work);
3127 template<
unsigned int Precision>
3141 if(
m==0 || n==0 || qcolumns==0 )
3149 q.setbounds(0,
m-1, 0, qcolumns-1);
3150 for(
i=0;
i<=
m-1;
i++)
3152 for(
j=0;
j<=qcolumns-1;
j++)
3168 rmatrixbdmultiplybyq<Precision>(qp,
m, n, tauq, q,
m, qcolumns,
false,
false);
3201 template<
unsigned int Precision>
3221 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3271 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 0, zrows-1,
i,
m-1, work);
3275 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i,
m-1, 0, zcolumns-1, work);
3279 while(
i!=i2+istep );
3319 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 0, zrows-1,
i+1,
m-1, work);
3323 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i+1,
m-1, 0, zcolumns-1, work);
3327 while(
i!=i2+istep );
3354 template<
unsigned int Precision>
3368 if(
m==0 || n==0 || ptrows==0 )
3376 pt.setbounds(0, ptrows-1, 0, n-1);
3377 for(
i=0;
i<=ptrows-1;
i++)
3379 for(
j=0;
j<=n-1;
j++)
3395 rmatrixbdmultiplybyp<Precision>(qp,
m, n, taup, pt, ptrows, n,
true,
true);
3428 template<
unsigned int Precision>
3448 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3502 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 0, zrows-1,
i+1, n-1, work);
3506 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i+1, n-1, 0, zcolumns-1, work);
3510 while(
i!=i2+istep );
3549 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 0, zrows-1,
i, n-1, work);
3553 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i, n-1, 0, zcolumns-1, work);
3557 while(
i!=i2+istep );
3584 template<
unsigned int Precision>
3602 d.setbounds(0, n-1);
3603 e.setbounds(0, n-1);
3604 for(
i=0;
i<=n-2;
i++)
3609 d(n-1) =
b(n-1,n-1);
3613 d.setbounds(0,
m-1);
3614 e.setbounds(0,
m-1);
3615 for(
i=0;
i<=
m-2;
i++)
3620 d(
m-1) =
b(
m-1,
m-1);
3629 template<
unsigned int Precision>
3653 taup.setbounds(1, minmn);
3654 tauq.setbounds(1, minmn);
3669 reflections::generatereflection<Precision>(t, mmip1, ltau);
3677 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i,
m,
i+1, n, work);
3688 reflections::generatereflection<Precision>(t, nmi, ltau);
3696 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m,
i+1, n, work);
3718 reflections::generatereflection<Precision>(t, nmip1, ltau);
3726 reflections::applyreflectionfromtheright<Precision>(a, ltau, t,
i+1,
m,
i, n, work);
3737 reflections::generatereflection<Precision>(t, mmi, ltau);
3745 reflections::applyreflectionfromtheleft<Precision>(a, ltau, t,
i+1,
m,
i+1, n, work);
3760 template<
unsigned int Precision>
3777 if(
m==0 || n==0 || qcolumns==0 )
3785 q.setbounds(1,
m, 1, qcolumns);
3794 for(
j=1;
j<=qcolumns;
j++)
3813 reflections::applyreflectionfromtheleft<Precision>(q, tauq(
i),
v,
i,
m, 1, qcolumns, work);
3822 ap::vmove(
v.getvector(1, vm), qp.getcolumn(
i, ip1,
m));
3824 reflections::applyreflectionfromtheleft<Precision>(q, tauq(
i),
v,
i+1,
m, 1, qcolumns, work);
3834 template<
unsigned int Precision>
3856 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
3907 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 1, zrows,
i,
m, work);
3911 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i,
m, 1, zcolumns, work);
3915 while(
i!=i2+istep );
3953 ap::vmove(
v.getvector(1, vm), qp.getcolumn(
i, ip1,
m));
3957 reflections::applyreflectionfromtheright<Precision>(z, tauq(
i),
v, 1, zrows,
i+1,
m, work);
3961 reflections::applyreflectionfromtheleft<Precision>(z, tauq(
i),
v,
i+1,
m, 1, zcolumns, work);
3965 while(
i!=i2+istep );
3975 template<
unsigned int Precision>
3992 if(
m==0 || n==0 || ptrows==0 )
4000 pt.setbounds(1, ptrows, 1, n);
4007 for(
i=1;
i<=ptrows;
i++)
4027 ap::vmove(
v.getvector(1, vm), qp.getrow(
i, ip1, n));
4029 reflections::applyreflectionfromtheright<Precision>(pt, taup(
i),
v, 1, ptrows,
i+1, n, work);
4039 reflections::applyreflectionfromtheright<Precision>(pt, taup(
i),
v, 1, ptrows,
i, n, work);
4049 template<
unsigned int Precision>
4071 if(
m<=0 || n<=0 || zrows<=0 || zcolumns<=0 )
4123 ap::vmove(
v.getvector(1, vm), qp.getrow(
i, ip1, n));
4127 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 1, zrows,
i+1, n, work);
4131 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i+1, n, 1, zcolumns, work);
4135 while(
i!=i2+istep );
4175 reflections::applyreflectionfromtheright<Precision>(z, taup(
i),
v, 1, zrows,
i, n, work);
4179 reflections::applyreflectionfromtheleft<Precision>(z, taup(
i),
v,
i, n, 1, zcolumns, work);
4183 while(
i!=i2+istep );
4192 template<
unsigned int Precision>
4212 for(
i=1;
i<=n-1;
i++)
4223 for(
i=1;
i<=
m-1;
i++)
4275 template<
unsigned int Precision>
4280 template<
unsigned int Precision>
4287 template<
unsigned int Precision>
4292 template<
unsigned int Precision>
4297 template<
unsigned int Precision>
4304 template<
unsigned int Precision>
4350 template<
unsigned int Precision>
4371 tau.setbounds(0, minmn-1);
4377 for(
i=0;
i<=
k-1;
i++)
4384 reflections::generatereflection<Precision>(t,
m-
i, tmp);
4394 reflections::applyreflectionfromtheleft<Precision>(a,
tau(
i), t,
i,
m-1,
i+1, n-1, work);
4420 template<
unsigned int Precision>
4437 if(
m<=0 || n<=0 || qcolumns<=0 )
4447 q.setbounds(0,
m-1, 0, qcolumns-1);
4450 for(
i=0;
i<=
m-1;
i++)
4452 for(
j=0;
j<=qcolumns-1;
j++)
4468 for(
i=
k-1;
i>=0;
i--)
4476 reflections::applyreflectionfromtheleft<Precision>(q,
tau(
i),
v,
i,
m-1, 0, qcolumns-1, work);
4496 template<
unsigned int Precision>
4511 r.setbounds(0,
m-1, 0, n-1);
4512 for(
i=0;
i<=n-1;
i++)
4516 for(
i=1;
i<=
m-1;
i++)
4518 ap::vmove(r.getrow(
i, 0, n-1), r.getrow(0, 0, n-1));
4520 for(
i=0;
i<=
k-1;
i++)
4530 template<
unsigned int Precision>
4548 tau.setbounds(1, minmn);
4562 reflections::generatereflection<Precision>(t, mmip1, tmp);
4572 reflections::applyreflectionfromtheleft<Precision>(a,
tau(
i), t,
i,
m,
i+1, n, work);
4581 template<
unsigned int Precision>
4599 if(
m==0 || n==0 || qcolumns==0 )
4609 q.setbounds(1,
m, 1, qcolumns);
4614 for(
j=1;
j<=qcolumns;
j++)
4639 reflections::applyreflectionfromtheleft<Precision>(q,
tau(
i),
v,
i,
m, 1, qcolumns, work);
4647 template<
unsigned int Precision>
4668 q.setbounds(1,
m, 1,
m);
4669 r.setbounds(1,
m, 1, n);
4674 qrdecomposition<Precision>(a,
m, n,
tau);
4685 ap::vmove(r.getrow(
i, 1, n), r.getrow(1, 1, n));
4695 unpackqfromqr<Precision>(a,
m, n,
tau,
m, q);
4735 template<
unsigned int Precision>
4740 template<
unsigned int Precision>
4747 template<
unsigned int Precision>
4752 template<
unsigned int Precision>
4757 template<
unsigned int Precision>
4764 template<
unsigned int Precision>
4806 template<
unsigned int Precision>
4825 tau.setbounds(0, minmn-1);
4827 for(
i=0;
i<=
k-1;
i++)
4834 reflections::generatereflection<Precision>(t, n-
i, tmp);
4844 reflections::applyreflectionfromtheright<Precision>(a,
tau(
i), t,
i+1,
m-1,
i, n-1, work);
4870 template<
unsigned int Precision>
4887 if(
m<=0 || n<=0 || qrows<=0 )
4897 q.setbounds(0, qrows-1, 0, n-1);
4900 for(
i=0;
i<=qrows-1;
i++)
4902 for(
j=0;
j<=n-1;
j++)
4918 for(
i=
k-1;
i>=0;
i--)
4926 reflections::applyreflectionfromtheright<Precision>(q,
tau(
i),
v, 0, qrows-1,
i, n-1, work);
4946 template<
unsigned int Precision>
4960 l.setbounds(0,
m-1, 0, n-1);
4961 for(
i=0;
i<=n-1;
i++)
4965 for(
i=1;
i<=
m-1;
i++)
4969 for(
i=0;
i<=
m-1;
i++)
4981 template<
unsigned int Precision>
5001 tau.setbounds(1, minmn);
5015 reflections::generatereflection<Precision>(t, nmip1, tmp);
5025 reflections::applyreflectionfromtheright<Precision>(a,
tau(
i), t,
i+1,
m,
i, n, work);
5035 template<
unsigned int Precision>
5053 if(
m==0 || n==0 || qrows==0 )
5063 q.setbounds(1, qrows, 1, n);
5066 for(
i=1;
i<=qrows;
i++)
5093 reflections::applyreflectionfromtheright<Precision>(q,
tau(
i),
v, 1, qrows,
i, n, work);
5101 template<
unsigned int Precision>
5117 q.setbounds(1, n, 1, n);
5118 l.setbounds(1,
m, 1, n);
5123 lqdecomposition<Precision>(a,
m, n,
tau);
5146 unpackqfromlq<Precision>(a,
m, n,
tau, n, q);
5186 template<
unsigned int Precision>
5190 template<
unsigned int Precision>
5194 template<
unsigned int Precision>
5199 template<
unsigned int Precision>
5204 template<
unsigned int Precision>
5211 template<
unsigned int Precision>
5222 template<
unsigned int Precision>
5229 template<
unsigned int Precision>
5240 template<
unsigned int Precision>
5255 template<
unsigned int Precision>
5258 template<
unsigned int Precision>
5281 template<
unsigned int Precision>
5302 result = amp::abs<Precision>(
x(i1));
5307 for(ix=i1; ix<=i2; ix++)
5311 absxi = amp::abs<Precision>(
x(ix));
5314 ssq = 1+ssq*amp::sqr<Precision>(scl/absxi);
5319 ssq = ssq+amp::sqr<Precision>(absxi/scl);
5323 result = scl*amp::sqrt<Precision>(ssq);
5328 template<
unsigned int Precision>
5339 a = amp::abs<Precision>(
x(
result));
5340 for(
i=i1+1;
i<=i2;
i++)
5342 if( amp::abs<Precision>(
x(
i))>amp::abs<Precision>(
x(
result)) )
5351 template<
unsigned int Precision>
5363 a = amp::abs<Precision>(
x(
result,
j));
5364 for(
i=i1+1;
i<=i2;
i++)
5366 if( amp::abs<Precision>(
x(
i,
j))>amp::abs<Precision>(
x(
result,
j)) )
5375 template<
unsigned int Precision>
5387 a = amp::abs<Precision>(
x(
i,
result));
5388 for(
j=j1+1;
j<=j2;
j++)
5390 if( amp::abs<Precision>(
x(
i,
j))>amp::abs<Precision>(
x(
i,
result)) )
5399 template<
unsigned int Precision>
5413 for(
j=j1;
j<=j2;
j++)
5417 for(
i=i1;
i<=i2;
i++)
5421 work(
j) = work(
j)+amp::abs<Precision>(a(
i,
j));
5425 for(
j=j1;
j<=j2;
j++)
5433 template<
unsigned int Precision>
5449 if( is1>is2 || js1>js2 )
5455 for(isrc=is1; isrc<=is2; isrc++)
5457 idst = isrc-is1+id1;
5458 ap::vmove(
b.getrow(idst, jd1, jd2), a.getrow(isrc, js1, js2));
5463 template<
unsigned int Precision>
5478 if( i1>i2 || j1>j2 )
5483 for(
i=i1;
i<=i2-1;
i++)
5490 ap::vmove(a.getcolumn(
j, ips, i2), a.getrow(
i, jps, j2));
5496 template<
unsigned int Precision>
5512 if( is1>is2 || js1>js2 )
5518 for(isrc=is1; isrc<=is2; isrc++)
5520 jdst = isrc-is1+jd1;
5521 ap::vmove(
b.getcolumn(jdst, id1, id2), a.getrow(isrc, js1, js2));
5526 template<
unsigned int Precision>
5552 if( i1>i2 || j1>j2 )
5564 for(
i=iy1;
i<=iy2;
i++)
5577 for(
i=i1;
i<=i2;
i++)
5589 if( i1>i2 || j1>j2 )
5601 for(
i=iy1;
i<=iy2;
i++)
5614 for(
i=i1;
i<=i2;
i++)
5617 ap::vadd(
y.getvector(iy1, iy2), a.getrow(
i, j1, j2),
v);
5623 template<
unsigned int Precision>
5634 xabs = amp::abs<Precision>(
x);
5635 yabs = amp::abs<Precision>(
y);
5636 w = amp::maximum<Precision>(xabs, yabs);
5637 z = amp::minimum<Precision>(xabs, yabs);
5644 result =
w*amp::sqrt<Precision>(1+amp::sqr<Precision>(z/
w));
5650 template<
unsigned int Precision>
5711 if( arows<=0 || acols<=0 || brows<=0 || bcols<=0 )
5732 for(
i=ci1;
i<=ci2;
i++)
5734 for(
j=cj1;
j<=cj2;
j++)
5742 for(
i=ci1;
i<=ci2;
i++)
5751 if( !transa && !transb )
5753 for(
l=ai1;
l<=ai2;
l++)
5755 for(r=bi1; r<=bi2; r++)
5759 ap::vadd(c.getrow(
k, cj1, cj2),
b.getrow(r, bj1, bj2),
v);
5768 if( !transa && transb )
5770 if( arows*acols<brows*bcols )
5772 for(r=bi1; r<=bi2; r++)
5774 for(
l=ai1;
l<=ai2;
l++)
5777 c(ci1+
l-ai1,cj1+r-bi1) = c(ci1+
l-ai1,cj1+r-bi1)+
alpha*
v;
5784 for(
l=ai1;
l<=ai2;
l++)
5786 for(r=bi1; r<=bi2; r++)
5789 c(ci1+
l-ai1,cj1+r-bi1) = c(ci1+
l-ai1,cj1+r-bi1)+
alpha*
v;
5799 if( transa && !transb )
5801 for(
l=aj1;
l<=aj2;
l++)
5803 for(r=bi1; r<=bi2; r++)
5807 ap::vadd(c.getrow(
k, cj1, cj2),
b.getrow(r, bj1, bj2),
v);
5816 if( transa && transb )
5818 if( arows*acols<brows*bcols )
5820 for(r=bi1; r<=bi2; r++)
5822 for(
i=1;
i<=crows;
i++)
5826 for(
l=ai1;
l<=ai2;
l++)
5838 for(
l=aj1;
l<=aj2;
l++)
5842 for(r=bi1; r<=bi2; r++)
5845 c(ci1+
l-aj1,cj1+r-bi1) = c(ci1+
l-aj1,cj1+r-bi1)+
alpha*
v;
5896 template<
unsigned int Precision>
5906 template<
unsigned int Precision>
5916 template<
unsigned int Precision>
5949 template<
unsigned int Precision>
5967 if( m1>m2 || n1>n2 )
5983 for(
j=m1;
j<=m2-1;
j++)
5987 if( ctemp!=1 || stemp!=0 )
5993 ap::vadd(a.getrow(
j, n1, n2), a.getrow(jp1, n1, n2), stemp);
6004 for(
j=m1;
j<=m2-1;
j++)
6008 if( ctemp!=1 || stemp!=0 )
6011 a(
j+1,n1) = ctemp*temp-stemp*a(
j,n1);
6012 a(
j,n1) = stemp*temp+ctemp*a(
j,n1);
6025 for(
j=m2-1;
j>=m1;
j--)
6029 if( ctemp!=1 || stemp!=0 )
6035 ap::vadd(a.getrow(
j, n1, n2), a.getrow(jp1, n1, n2), stemp);
6046 for(
j=m2-1;
j>=m1;
j--)
6050 if( ctemp!=1 || stemp!=0 )
6053 a(
j+1,n1) = ctemp*temp-stemp*a(
j,n1);
6054 a(
j,n1) = stemp*temp+ctemp*a(
j,n1);
6087 template<
unsigned int Precision>
6117 for(
j=n1;
j<=n2-1;
j++)
6121 if( ctemp!=1 || stemp!=0 )
6126 ap::vmul(a.getcolumn(
j, m1, m2), ctemp);
6127 ap::vadd(a.getcolumn(
j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6138 for(
j=n1;
j<=n2-1;
j++)
6142 if( ctemp!=1 || stemp!=0 )
6145 a(m1,
j+1) = ctemp*temp-stemp*a(m1,
j);
6146 a(m1,
j) = stemp*temp+ctemp*a(m1,
j);
6159 for(
j=n2-1;
j>=n1;
j--)
6163 if( ctemp!=1 || stemp!=0 )
6168 ap::vmul(a.getcolumn(
j, m1, m2), ctemp);
6169 ap::vadd(a.getcolumn(
j, m1, m2), a.getcolumn(jp1, m1, m2), stemp);
6180 for(
j=n2-1;
j>=n1;
j--)
6184 if( ctemp!=1 || stemp!=0 )
6187 a(m1,
j+1) = ctemp*temp-stemp*a(m1,
j);
6188 a(m1,
j) = stemp*temp+ctemp*a(m1,
j);
6204 template<
unsigned int Precision>
6233 r = amp::sqrt<Precision>(amp::sqr<Precision>(f1)+amp::sqr<Precision>(g1));
6236 if( amp::abs<Precision>(
f)>amp::abs<Precision>(
g) && cs<0 )
6289 template<
unsigned int Precision>
6294 bool isfractionalaccuracyrequired,
6301 template<
unsigned int Precision>
6306 bool isfractionalaccuracyrequired,
6313 template<
unsigned int Precision>
6318 bool isfractionalaccuracyrequired,
6328 template<
unsigned int Precision>
6331 template<
unsigned int Precision>
6337 template<
unsigned int Precision>
6427 template<
unsigned int Precision>
6432 bool isfractionalaccuracyrequired,
6452 result = bidiagonalsvddecompositioninternal<Precision>(d1, e1, n, isupper, isfractionalaccuracyrequired, u, 0, nru, c, 0, ncc, vt, 0, ncvt);
6470 template<
unsigned int Precision>
6475 bool isfractionalaccuracyrequired,
6486 result = bidiagonalsvddecompositioninternal<Precision>(d, e, n, isupper, isfractionalaccuracyrequired, u, 1, nru, c, 1, ncc, vt, 1, ncvt);
6494 template<
unsigned int Precision>
6499 bool isfractionalaccuracyrequired,
6556 bool matrixsplitflag;
6584 ap::vmul(vt.getrow(vstart, vstart, vstart+ncvt-1), -1);
6610 for(
i=1;
i<=n-1;
i++)
6615 for(
i=1;
i<=n-1;
i++)
6634 for(
i=1;
i<=n-1;
i++)
6636 rotations::generaterotation<Precision>(d(
i), e(
i), cs, sn, r);
6649 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, 1+ustart-1, n+ustart-1, work0, work1, u, utemp);
6653 rotations::applyrotationsfromtheleft<Precision>(fwddir, 1+cstart-1, n+cstart-1, cstart, cend, work0, work1, c, ctemp);
6662 tolmul = amp::maximum<Precision>(10, amp::minimum<Precision>(100, amp::pow<Precision>(eps, -
amp::ampf<Precision>(
"0.125"))));
6664 if( !isfractionalaccuracyrequired )
6675 smax = amp::maximum<Precision>(smax, amp::abs<Precision>(d(
i)));
6677 for(
i=1;
i<=n-1;
i++)
6679 smax = amp::maximum<Precision>(smax, amp::abs<Precision>(e(
i)));
6688 sminoa = amp::abs<Precision>(d(1));
6694 mu = amp::abs<Precision>(d(
i))*(
mu/(
mu+amp::abs<Precision>(e(
i-1))));
6695 sminoa = amp::minimum<Precision>(sminoa,
mu);
6702 sminoa = sminoa/amp::sqrt<Precision>(n);
6703 thresh = amp::maximum<Precision>(tol*sminoa, maxitr*n*n*unfl);
6711 thresh = amp::maximum<Precision>(amp::abs<Precision>(tol)*smax, maxitr*n*n*unfl);
6751 if( tol<0 && amp::abs<Precision>(d(
m))<=thresh )
6755 smax = amp::abs<Precision>(d(
m));
6757 matrixsplitflag =
false;
6758 for(lll=1; lll<=
m-1; lll++)
6761 abss = amp::abs<Precision>(d(ll));
6762 abse = amp::abs<Precision>(e(ll));
6763 if( tol<0 && abss<=thresh )
6769 matrixsplitflag =
true;
6772 smin = amp::minimum<Precision>(smin, abss);
6773 smax = amp::maximum<Precision>(smax, amp::maximum<Precision>(abss, abse));
6775 if( !matrixsplitflag )
6807 svdv2x2<Precision>(d(
m-1), e(
m-1), d(
m), sigmn, sigmx, sinr, cosr, sinl, cosl);
6818 mm1 =
m-1+(vstart-1);
6821 ap::vmul(vt.getrow(mm0, vstart, vend), cosr);
6822 ap::vsub(vt.getrow(mm0, vstart, vend), vt.getrow(mm1, vstart, vend), sinr);
6831 ap::vmul(u.getcolumn(mm0, ustart, uend), cosl);
6832 ap::vsub(u.getcolumn(mm0, ustart, uend), u.getcolumn(mm1, ustart, uend), sinl);
6841 ap::vmul(c.getrow(mm0, cstart, cend), cosl);
6842 ap::vsub(c.getrow(mm0, cstart, cend), c.getrow(mm1, cstart, cend), sinl);
6859 if( idir==1 && amp::abs<Precision>(d(ll))<
amp::ampf<Precision>(
"1.0E-3")*amp::abs<Precision>(d(
m)) )
6863 if( idir==2 && amp::abs<Precision>(d(
m))<
amp::ampf<Precision>(
"1.0E-3")*amp::abs<Precision>(d(ll)) )
6867 if( ll!=oldll ||
m!=oldm || bchangedir )
6869 if( amp::abs<Precision>(d(ll))>=amp::abs<Precision>(d(
m)) )
6897 if( amp::abs<Precision>(e(
m-1))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(
m)) || tol<0 && amp::abs<Precision>(e(
m-1))<=thresh )
6909 mu = amp::abs<Precision>(d(ll));
6912 for(lll=ll; lll<=
m-1; lll++)
6914 if( amp::abs<Precision>(e(lll))<=tol*
mu )
6921 mu = amp::abs<Precision>(d(lll+1))*(
mu/(
mu+amp::abs<Precision>(e(lll))));
6922 sminl = amp::minimum<Precision>(sminl,
mu);
6937 if( amp::abs<Precision>(e(ll))<=amp::abs<Precision>(tol)*amp::abs<Precision>(d(ll)) || tol<0 && amp::abs<Precision>(e(ll))<=thresh )
6949 mu = amp::abs<Precision>(d(
m));
6952 for(lll=
m-1; lll>=ll; lll--)
6954 if( amp::abs<Precision>(e(lll))<=tol*
mu )
6961 mu = amp::abs<Precision>(d(lll))*(
mu/(
mu+amp::abs<Precision>(e(lll))));
6962 sminl = amp::minimum<Precision>(sminl,
mu);
6977 if( tol>=0 && n*tol*(sminl/smax)<=amp::maximum<Precision>(eps,
amp::ampf<Precision>(
"0.01")*tol) )
6993 sll = amp::abs<Precision>(d(ll));
6994 svd2x2<Precision>(d(
m-1), e(
m-1), d(
m), shift, r);
6998 sll = amp::abs<Precision>(d(
m));
6999 svd2x2<Precision>(d(ll), e(ll), d(ll+1), shift, r);
7007 if( amp::sqr<Precision>(shift/sll)<eps )
7033 for(
i=ll;
i<=
m-1;
i++)
7035 rotations::generaterotation<Precision>(d(
i)*cs, e(
i), cs, sn, r);
7040 rotations::generaterotation<Precision>(oldcs*r, d(
i+1)*sn, oldcs, oldsn, tmp);
7044 work2(
i-ll+1) = oldcs;
7045 work3(
i-ll+1) = oldsn;
7056 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7060 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work2, work3, u, utemp);
7064 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7070 if( amp::abs<Precision>(e(
m-1))<=thresh )
7084 for(
i=
m;
i>=ll+1;
i--)
7086 rotations::generaterotation<Precision>(d(
i)*cs, e(
i-1), cs, sn, r);
7091 rotations::generaterotation<Precision>(oldcs*r, d(
i-1)*sn, oldcs, oldsn, tmp);
7095 work2(
i-ll) = oldcs;
7096 work3(
i-ll) = -oldsn;
7107 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7111 rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work0, work1, u, utemp);
7115 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7121 if( amp::abs<Precision>(e(ll))<=thresh )
7140 f = (amp::abs<Precision>(d(ll))-shift)*(extsignbdsqr<Precision>(1, d(ll))+shift/d(ll));
7142 for(
i=ll;
i<=
m-1;
i++)
7144 rotations::generaterotation<Precision>(
f,
g, cosr, sinr, r);
7149 f = cosr*d(
i)+sinr*e(
i);
7150 e(
i) = cosr*e(
i)-sinr*d(
i);
7152 d(
i+1) = cosr*d(
i+1);
7153 rotations::generaterotation<Precision>(
f,
g, cosl, sinl, r);
7155 f = cosl*e(
i)+sinl*d(
i+1);
7156 d(
i+1) = cosl*d(
i+1)-sinl*e(
i);
7160 e(
i+1) = cosl*e(
i+1);
7162 work0(
i-ll+1) = cosr;
7163 work1(
i-ll+1) = sinr;
7164 work2(
i-ll+1) = cosl;
7165 work3(
i-ll+1) = sinl;
7174 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work0, work1, vt, vttemp);
7178 rotations::applyrotationsfromtheright<Precision>(fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work2, work3, u, utemp);
7182 rotations::applyrotationsfromtheleft<Precision>(fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work2, work3, c, ctemp);
7188 if( amp::abs<Precision>(e(
m-1))<=thresh )
7200 f = (amp::abs<Precision>(d(
m))-shift)*(extsignbdsqr<Precision>(1, d(
m))+shift/d(
m));
7202 for(
i=
m;
i>=ll+1;
i--)
7204 rotations::generaterotation<Precision>(
f,
g, cosr, sinr, r);
7209 f = cosr*d(
i)+sinr*e(
i-1);
7210 e(
i-1) = cosr*e(
i-1)-sinr*d(
i);
7212 d(
i-1) = cosr*d(
i-1);
7213 rotations::generaterotation<Precision>(
f,
g, cosl, sinl, r);
7215 f = cosl*e(
i-1)+sinl*d(
i-1);
7216 d(
i-1) = cosl*d(
i-1)-sinl*e(
i-1);
7220 e(
i-2) = cosl*e(
i-2);
7223 work1(
i-ll) = -sinr;
7225 work3(
i-ll) = -sinl;
7232 if( amp::abs<Precision>(e(ll))<=thresh )
7242 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+vstart-1,
m+vstart-1, vstart, vend, work2, work3, vt, vttemp);
7246 rotations::applyrotationsfromtheright<Precision>(!fwddir, ustart, uend, ll+ustart-1,
m+ustart-1, work0, work1, u, utemp);
7250 rotations::applyrotationsfromtheleft<Precision>(!fwddir, ll+cstart-1,
m+cstart-1, cstart, cend, work0, work1, c, ctemp);
7275 ap::vmul(vt.getrow(
i+vstart-1, vstart, vend), -1);
7284 for(
i=1;
i<=n-1;
i++)
7292 for(
j=2;
j<=n+1-
i;
j++)
7312 ap::vmove(vt.getrow(isub+vstart-1, vstart, vend), vt.getrow(
j+vstart-1, vstart, vend));
7319 ap::vmove(u.getcolumn(isub+ustart-1, ustart, uend), u.getcolumn(
j+ustart-1, ustart, uend));
7326 ap::vmove(c.getrow(isub+cstart-1, cstart, cend), c.getrow(
j+cstart-1, cstart, cend));
7335 template<
unsigned int Precision>
7344 result = amp::abs<Precision>(a);
7348 result = -amp::abs<Precision>(a);
7354 template<
unsigned int Precision>
7372 fa = amp::abs<Precision>(
f);
7373 ga = amp::abs<Precision>(
g);
7374 ha = amp::abs<Precision>(
h);
7375 fhmn = amp::minimum<Precision>(
fa, ha);
7376 fhmx = amp::maximum<Precision>(
fa, ha);
7386 ssmax = amp::maximum<Precision>(fhmx, ga)*amp::sqrt<Precision>(1+amp::sqr<Precision>(amp::minimum<Precision>(fhmx, ga)/amp::maximum<Precision>(fhmx, ga)));
7394 at = (fhmx-fhmn)/fhmx;
7395 au = amp::sqr<Precision>(ga/fhmx);
7396 c = 2/(amp::sqrt<Precision>(aas*aas+au)+amp::sqrt<Precision>(at*at+au));
7411 ssmin = fhmn*fhmx/ga;
7417 at = (fhmx-fhmn)/fhmx;
7418 c = 1/(amp::sqrt<Precision>(1+amp::sqr<Precision>(aas*au))+amp::sqrt<Precision>(1+amp::sqr<Precision>(at*au)));
7420 ssmin = ssmin+ssmin;
7428 template<
unsigned int Precision>
7467 fa = amp::abs<Precision>(ft);
7469 ha = amp::abs<Precision>(
h);
7494 ga = amp::abs<Precision>(gt);
7557 s = amp::sqrt<Precision>(tt+mm);
7560 r = amp::abs<Precision>(
m);
7564 r = amp::sqrt<Precision>(
l*
l+mm);
7577 t = extsignbdsqr<Precision>(2, ft)*extsignbdsqr<Precision>(1, gt);
7581 t = gt/extsignbdsqr<Precision>(d, ft)+
m/t;
7586 t = (
m/(
s+t)+
m/(r+
l))*(1+a);
7588 l = amp::sqrt<Precision>(t*t+4);
7591 clt = (crt+srt*
m)/a;
7616 tsign = extsignbdsqr<Precision>(1, csr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1,
f);
7620 tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, csl)*extsignbdsqr<Precision>(1,
g);
7624 tsign = extsignbdsqr<Precision>(1, snr)*extsignbdsqr<Precision>(1, snl)*extsignbdsqr<Precision>(1,
h);
7626 ssmax = extsignbdsqr<Precision>(ssmax, tsign);
7627 ssmin = extsignbdsqr<Precision>(ssmin, tsign*extsignbdsqr<Precision>(1,
f)*extsignbdsqr<Precision>(1,
h));
7685 template<
unsigned int Precision>
7691 int additionalmemory,
7695 template<
unsigned int Precision>
7701 int additionalmemory,
7757 template<
unsigned int Precision>
7763 int additionalmemory,
7800 w.setbounds(1, minmn);
7807 u.setbounds(0, nru-1, 0, ncu-1);
7813 u.setbounds(0, nru-1, 0, ncu-1);
7821 vt.setbounds(0, nrvt-1, 0, ncvt-1);
7827 vt.setbounds(0, nrvt-1, 0, ncvt-1);
7842 qr::rmatrixqr<Precision>(a,
m, n,
tau);
7843 for(
i=0;
i<=n-1;
i++)
7845 for(
j=0;
j<=
i-1;
j++)
7850 bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7851 bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7852 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper,
w, e);
7853 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u, 0, a, 0, vt, ncvt);
7862 qr::rmatrixqr<Precision>(a,
m, n,
tau);
7863 qr::rmatrixqrunpackq<Precision>(a,
m, n,
tau, ncu, u);
7864 for(
i=0;
i<=n-1;
i++)
7866 for(
j=0;
j<=
i-1;
j++)
7871 bidiagonal::rmatrixbd<Precision>(a, n, n, tauq, taup);
7872 bidiagonal::rmatrixbdunpackpt<Precision>(a, n, n, taup, nrvt, vt);
7873 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a, n, n, isupper,
w, e);
7874 if( additionalmemory<1 )
7880 bidiagonal::rmatrixbdmultiplybyq<Precision>(a, n, n, tauq, u,
m, n,
true,
false);
7881 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u,
m, a, 0, vt, ncvt);
7890 bidiagonal::rmatrixbdunpackq<Precision>(a, n, n, tauq, n, t2);
7891 blas::copymatrix<Precision>(u, 0,
m-1, 0, n-1, a, 0,
m-1, 0, n-1);
7892 blas::inplacetranspose<Precision>(t2, 0, n-1, 0, n-1, work);
7893 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, n, isupper,
false, u, 0, t2, n, vt, ncvt);
7894 blas::matrixmatrixmultiply<Precision>(a, 0,
m-1, 0, n-1,
false, t2, 0, n-1, 0, n-1,
true,
amp::ampf<Precision>(
"1.0"), u, 0,
m-1, 0, n-1,
amp::ampf<Precision>(
"0.0"), work);
7912 lq::rmatrixlq<Precision>(a,
m, n,
tau);
7913 for(
i=0;
i<=
m-1;
i++)
7915 for(
j=
i+1;
j<=
m-1;
j++)
7920 bidiagonal::rmatrixbd<Precision>(a,
m,
m, tauq, taup);
7921 bidiagonal::rmatrixbdunpackq<Precision>(a,
m,
m, tauq, ncu, u);
7922 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m,
m, isupper,
w, e);
7924 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7925 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, 0);
7926 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7935 lq::rmatrixlq<Precision>(a,
m, n,
tau);
7936 lq::rmatrixlqunpackq<Precision>(a,
m, n,
tau, nrvt, vt);
7937 for(
i=0;
i<=
m-1;
i++)
7939 for(
j=
i+1;
j<=
m-1;
j++)
7944 bidiagonal::rmatrixbd<Precision>(a,
m,
m, tauq, taup);
7945 bidiagonal::rmatrixbdunpackq<Precision>(a,
m,
m, tauq, ncu, u);
7946 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m,
m, isupper,
w, e);
7948 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7949 if( additionalmemory<1 )
7955 bidiagonal::rmatrixbdmultiplybyp<Precision>(a,
m,
m, taup, vt,
m, n,
false,
true);
7956 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, n);
7964 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m,
m, taup,
m, t2);
7965 result = bdsvd::rmatrixbdsvd<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, t2,
m);
7966 blas::copymatrix<Precision>(vt, 0,
m-1, 0, n-1, a, 0,
m-1, 0, n-1);
7967 blas::matrixmatrixmultiply<Precision>(t2, 0,
m-1, 0,
m-1,
false, a, 0,
m-1, 0, n-1,
false,
amp::ampf<Precision>(
"1.0"), vt, 0,
m-1, 0, n-1,
amp::ampf<Precision>(
"0.0"), work);
7969 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7980 bidiagonal::rmatrixbd<Precision>(a,
m, n, tauq, taup);
7981 bidiagonal::rmatrixbdunpackq<Precision>(a,
m, n, tauq, ncu, u);
7982 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m, n, taup, nrvt, vt);
7983 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m, n, isupper,
w, e);
7985 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7986 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, a, 0, u, nru, vt, ncvt);
7987 blas::inplacetranspose<Precision>(u, 0, nru-1, 0, ncu-1, work);
7994 bidiagonal::rmatrixbd<Precision>(a,
m, n, tauq, taup);
7995 bidiagonal::rmatrixbdunpackq<Precision>(a,
m, n, tauq, ncu, u);
7996 bidiagonal::rmatrixbdunpackpt<Precision>(a,
m, n, taup, nrvt, vt);
7997 bidiagonal::rmatrixbdunpackdiagonals<Precision>(a,
m, n, isupper,
w, e);
7998 if( additionalmemory<2 || uneeded==0 )
8004 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, u, nru, a, 0, vt, ncvt);
8013 blas::copyandtranspose<Precision>(u, 0,
m-1, 0, minmn-1, t2, 0, minmn-1, 0,
m-1);
8014 result = bdsvd::rmatrixbdsvd<Precision>(
w, e, minmn, isupper,
false, u, 0, t2,
m, vt, ncvt);
8015 blas::copyandtranspose<Precision>(t2, 0, minmn-1, 0,
m-1, u, 0,
m-1, 0, minmn-1);
8025 template<
unsigned int Precision>
8031 int additionalmemory,
8068 w.setbounds(1, minmn);
8075 u.setbounds(1, nru, 1, ncu);
8081 u.setbounds(1, nru, 1, ncu);
8089 vt.setbounds(1, nrvt, 1, ncvt);
8095 vt.setbounds(1, nrvt, 1, ncvt);
8110 qr::qrdecomposition<Precision>(a,
m, n,
tau);
8113 for(
j=1;
j<=
i-1;
j++)
8118 bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8119 bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8120 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper,
w, e);
8121 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u, 0, a, 0, vt, ncvt);
8130 qr::qrdecomposition<Precision>(a,
m, n,
tau);
8131 qr::unpackqfromqr<Precision>(a,
m, n,
tau, ncu, u);
8134 for(
j=1;
j<=
i-1;
j++)
8139 bidiagonal::tobidiagonal<Precision>(a, n, n, tauq, taup);
8140 bidiagonal::unpackptfrombidiagonal<Precision>(a, n, n, taup, nrvt, vt);
8141 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a, n, n, isupper,
w, e);
8142 if( additionalmemory<1 )
8148 bidiagonal::multiplybyqfrombidiagonal<Precision>(a, n, n, tauq, u,
m, n,
true,
false);
8149 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u,
m, a, 0, vt, ncvt);
8158 bidiagonal::unpackqfrombidiagonal<Precision>(a, n, n, tauq, n, t2);
8159 blas::copymatrix<Precision>(u, 1,
m, 1, n, a, 1,
m, 1, n);
8160 blas::inplacetranspose<Precision>(t2, 1, n, 1, n, work);
8161 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, n, isupper,
false, u, 0, t2, n, vt, ncvt);
8162 blas::matrixmatrixmultiply<Precision>(a, 1,
m, 1, n,
false, t2, 1, n, 1, n,
true,
amp::ampf<Precision>(
"1.0"), u, 1,
m, 1, n,
amp::ampf<Precision>(
"0.0"), work);
8180 lq::lqdecomposition<Precision>(a,
m, n,
tau);
8181 for(
i=1;
i<=
m-1;
i++)
8183 for(
j=
i+1;
j<=
m;
j++)
8188 bidiagonal::tobidiagonal<Precision>(a,
m,
m, tauq, taup);
8189 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m,
m, tauq, ncu, u);
8190 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m,
m, isupper,
w, e);
8192 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8193 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, 0);
8194 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8203 lq::lqdecomposition<Precision>(a,
m, n,
tau);
8204 lq::unpackqfromlq<Precision>(a,
m, n,
tau, nrvt, vt);
8205 for(
i=1;
i<=
m-1;
i++)
8207 for(
j=
i+1;
j<=
m;
j++)
8212 bidiagonal::tobidiagonal<Precision>(a,
m,
m, tauq, taup);
8213 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m,
m, tauq, ncu, u);
8214 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m,
m, isupper,
w, e);
8216 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8217 if( additionalmemory<1 )
8223 bidiagonal::multiplybypfrombidiagonal<Precision>(a,
m,
m, taup, vt,
m, n,
false,
true);
8224 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, vt, n);
8232 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m,
m, taup,
m, t2);
8233 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e,
m, isupper,
false, a, 0, u, nru, t2,
m);
8234 blas::copymatrix<Precision>(vt, 1,
m, 1, n, a, 1,
m, 1, n);
8235 blas::matrixmatrixmultiply<Precision>(t2, 1,
m, 1,
m,
false, a, 1,
m, 1, n,
false,
amp::ampf<Precision>(
"1.0"), vt, 1,
m, 1, n,
amp::ampf<Precision>(
"0.0"), work);
8237 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8248 bidiagonal::tobidiagonal<Precision>(a,
m, n, tauq, taup);
8249 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m, n, tauq, ncu, u);
8250 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m, n, taup, nrvt, vt);
8251 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m, n, isupper,
w, e);
8253 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8254 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, a, 0, u, nru, vt, ncvt);
8255 blas::inplacetranspose<Precision>(u, 1, nru, 1, ncu, work);
8262 bidiagonal::tobidiagonal<Precision>(a,
m, n, tauq, taup);
8263 bidiagonal::unpackqfrombidiagonal<Precision>(a,
m, n, tauq, ncu, u);
8264 bidiagonal::unpackptfrombidiagonal<Precision>(a,
m, n, taup, nrvt, vt);
8265 bidiagonal::unpackdiagonalsfrombidiagonal<Precision>(a,
m, n, isupper,
w, e);
8266 if( additionalmemory<2 || uneeded==0 )
8272 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, u, nru, a, 0, vt, ncvt);
8281 blas::copyandtranspose<Precision>(u, 1,
m, 1, minmn, t2, 1, minmn, 1,
m);
8282 result = bdsvd::bidiagonalsvddecomposition<Precision>(
w, e, minmn, isupper,
false, u, 0, t2,
m, vt, ncvt);
8283 blas::copyandtranspose<Precision>(t2, 1, minmn, 1,
m, u, 1,
m, 1, minmn);