%%% -*-BibTeX-*- %%% ==================================================================== %%% BibTeX-file{ %%% author = "Nelson H. F. Beebe", %%% version = "2.23", %%% date = "30 October 2017", %%% time = "06:25:46 MDT", %%% filename = "issac.bib", %%% address = "University of Utah %%% Department of Mathematics, 110 LCB %%% 155 S 1400 E RM 233 %%% Salt Lake City, UT 84112-0090 %%% USA", %%% telephone = "+1 801 581 5254", %%% FAX = "+1 801 581 4148", %%% URL = "http://www.math.utah.edu/~beebe", %%% checksum = "09179 38216 189780 1948736", %%% email = "beebe at math.utah.edu, beebe at acm.org, %%% beebe at computer.org (Internet)", %%% codetable = "ISO/ASCII", %%% keywords = "bibliography, ISSAC, International %%% Symposium on Symbolic and Algebraic %%% Computation", %%% license = "public domain", %%% supported = "yes", %%% docstring = "This is a bibliography of papers presented %%% at the annual ISSAC (International Symposia %%% on Symbolic and Algebraic Computation) %%% conferences. These conferences have been %%% held most years since 1966, with the 23th on %%% August 13--15, 1998 at the University of %%% Rostock, Germany. %%% %%% It also includes papers from the PASCO %%% (Parallel Symbolic Computation) %%% conferences, the SYMSAC (Symbolic and %%% Algebraic Computation) conferences, and a %%% few papers on symbolic algebra from other %%% conferences not specifically devoted to %%% that subject. %%% %%% Companion bibliographies sigsam.bib and %%% jsymcomp.bib cover papers in the area of %%% symbolic and algebraic computation %%% published in SIGSAM Bulletin and the %%% Journal of Symbolic Computation. %%% %%% At version 2.23, the year coverage looked %%% like this: %%% %%% 1976 ( 1) 1989 ( 106) 2002 ( 36) %%% 1977 ( 0) 1990 ( 64) 2003 ( 40) %%% 1978 ( 0) 1991 ( 86) 2004 ( 47) %%% 1979 ( 1) 1992 ( 50) 2005 ( 52) %%% 1980 ( 0) 1993 ( 58) 2006 ( 55) %%% 1981 ( 2) 1994 ( 103) 2007 ( 54) %%% 1982 ( 1) 1995 ( 52) 2008 ( 47) %%% 1983 ( 0) 1996 ( 50) 2009 ( 54) %%% 1984 ( 0) 1997 ( 88) 2010 ( 52) %%% 1985 ( 0) 1998 ( 49) 2011 ( 50) %%% 1986 ( 50) 1999 ( 41) 2012 ( 53) %%% 1987 ( 0) 2000 ( 44) 2013 ( 55) %%% 1988 ( 0) 2001 ( 48) %%% %%% Article: 3 %%% Book: 1 %%% InProceedings: 1441 %%% Proceedings: 44 %%% %%% Total entries: 1489 %%% %%% Regrettably, bibliographic data for most of %%% these conferences prior to 1989 are %%% inaccessible electronically. With an %%% estimated 60 papers at each conference, a %%% complete bibliography would have about 1800 %%% entries, so the coverage is only about 25%. %%% %%% This bibliography has been collected from %%% bibliographies in the author's personal %%% files, from the OCLC and IEEE INSPEC %%% (1989--1995) databases, and from the %%% computer science bibliography collection on %%% ftp.ira.uka.de in /pub/bibliography to %%% which many people of have contributed. The %%% snapshot of this collection was taken on %%% 5-May-1994, and it consists of 441 BibTeX %%% files, 2,672,675 lines, 205,289 entries, %%% and 6,375 String{} abbreviations, %%% occupying 94.8MB of disk space. %%% %%% Numerous errors have been corrected, and TeX %%% mathematics mode markup has been added %%% manually to more than 1000 text fragments in %%% the key values. %%% %%% BibTeX citation tags are uniformly chosen %%% as name:year:abbrev, where name is the %%% family name of the first author or editor, %%% year is a 4-digit number, and abbrev is a %%% 3-letter condensation of important title %%% words. Citation tags were automatically %%% generated by software developed for the %%% BibNet Project. %%% %%% In this bibliography, entries are sorted %%% first by ascending year, and within each %%% year, alphabetically by author or editor, %%% and then, if necessary, by the 3-letter %%% abbreviation at the end of the BibTeX %%% citation tag, using the bibsort -byyear %%% utility. Year order has been chosen to %%% make it easier to identify the most recent %%% work. %%% %%% The checksum field above contains a CRC-16 %%% checksum as the first value, followed by the %%% equivalent of the standard UNIX wc (word %%% count) utility output of lines, words, and %%% characters. This is produced by Robert %%% Solovay's checksum utility.", %%% } %%% ==================================================================== @Preamble{ "\ifx \undefined \mathbb \def \mathbb #1{{\bf #1}}\fi" # "\ifx \undefined \mathcal \def \mathcal #1{{\cal #1}}\fi" } %%% ==================================================================== %%% Acknowledgement abbreviations: @String{ack-nhfb = "Nelson H. F. Beebe, University of Utah, Department of Mathematics, 110 LCB, 155 S 1400 E RM 233, Salt Lake City, UT 84112-0090, USA, Tel: +1 801 581 5254, FAX: +1 801 581 4148, e-mail: \path|beebe@math.utah.edu|, \path|beebe@acm.org|, \path|beebe@computer.org| (Internet), URL: \path|http://www.math.utah.edu/~beebe/|"} %%% ==================================================================== %%% Journal abbreviations: @String{j-SIGNUM = "ACM SIGNUM Newsletter"} @String{j-SIGSAM = "SIGSAM Bulletin (ACM Special Interest Group on Symbolic and Algebraic Manipulation)"} %%% ==================================================================== %%% Publisher abbreviations: @String{pub-ACM = "ACM Press"} @String{pub-ACM:adr = "New York, NY 10036, USA"} @String{pub-AW = "Ad{\-d}i{\-s}on-Wes{\-l}ey"} @String{pub-AW:adr = "Reading, MA, USA"} @String{pub-CAMBRIDGE = "Cambridge University Press"} @String{pub-CAMBRIDGE:adr = "Cambridge, UK"} @String{pub-IEEE = "IEEE Computer Society Press"} @String{pub-IEEE:adr = "1109 Spring Street, Suite 300, Silver Spring, MD 20910, USA"} @String{pub-SIAM = "SIAM Press"} @String{pub-SIAM:adr = "Philadelphia, PA, USA"} @String{pub-SV = "Springer-Verlag"} @String{pub-SV:adr = "Berlin, Germany~/ Heidelberg, Germany~/ London, UK~/ etc."} @String{pub-WORLD-SCI = "World Scientific Publishing Co."} @String{pub-WORLD-SCI:adr = "Singapore; Philadelphia, PA, USA; River Edge, NJ, USA"} %%% ==================================================================== %%% Series abbreviations: @String{ser-LNCS = "Lecture Notes in Computer Science"} %%% ==================================================================== %%% Bibliography entries: @InProceedings{Fateman:1981:CAN, author = "Richard J. Fateman", title = "Computer Algebra and Numerical Integration", crossref = "Wang:1981:SPA", pages = "228--232", year = "1981", bibdate = "Mon Apr 25 07:01:52 2005", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Algebraic manipulation systems such as MACSYMA include algorithms and heuristic procedures for indefinite and definite integration, yet these system facilities are not as generally useful as might be thought. Most isolated definite integration problems are more efficiently tackled with numerical programs. Unfortunately, the answers obtained are sometimes incorrect, in spite of assurances of accuracy; furthermore, large classes of problems can sometimes be solved more rapidly by preliminary algebraic transformations. In this paper we indicate various directions for improving the usefulness of integration programs given closed form integrands, via algebraic manipulation techniques. These include expansions in partial fractions or Taylor series, detection and removal of singularities and symmetries, and various approximation techniques for troublesome problems.", acknowledgement = ack-nhfb, } @Book{Buchberger:1982:CAS, author = "Bruno Buchberger and George Edward Collins and Rudiger Loos and R. Albrecht", title = "Computer algebra: symbolic and algebraic computation", volume = "4", publisher = pub-SV, address = pub-SV:adr, pages = "vi + 283", year = "1982", ISBN = "0-387-81684-4", ISBN-13 = "978-0-387-81684-5", LCCN = "QA155.7.E4 C65 1982", bibdate = "Thu Dec 28 13:48:31 1995", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "Computing. Supplementum", acknowledgement = ack-nhfb, keywords = "algorithms; measurement; theory", subject = "S1 Algebra --- Data processing; S2 Machine theory", } @InProceedings{Abbott:1986:BAN, author = "J. A. Abbott and R. J. Bradford and J. H. Davenport", title = "The {Bath} algebraic number package", crossref = "Char:1986:PSS", pages = "250--253", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p250-abbott/", acknowledgement = ack-nhfb, keywords = "design; measurement; performance", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE.", } @InProceedings{Abdali:1986:OOA, author = "S. K. Abdali and Guy W. Cherry and Neil Soiffer", title = "An object-oriented approach to algebra system design", crossref = "Char:1986:PSS", pages = "24--30", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p24-abdali/", acknowledgement = ack-nhfb, keywords = "algorithms; design; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf D.3.3} Software, PROGRAMMING LANGUAGES, Language Constructs and Features, Abstract data types. {\bf D.3.4} Software, PROGRAMMING LANGUAGES, Processors, Run-time environments. {\bf D.3.2} Software, PROGRAMMING LANGUAGES, Language Classifications, Specialized application languages. {\bf D.3.2} Software, PROGRAMMING LANGUAGES, Language Classifications, Very high-level languages.", } @InProceedings{Akritis:1986:TNU, author = "Alkiviadis G. Akritis", title = "There is no ``{Uspensky}'s method''", crossref = "Char:1986:PSS", pages = "88--90", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p88-akritis/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Polynomials, methods for. {\bf K.2} Computing Milieux, HISTORY OF COMPUTING, Systems. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Iterative methods. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials.", } @InProceedings{Arnborg:1986:ADR, author = "S. Arnborg and H. Feng", title = "Algebraic decomposition of regular curves", crossref = "Char:1986:PSS", pages = "53--55", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p53-arnborg/", acknowledgement = ack-nhfb, keywords = "theory", subject = "{\bf I.1.m} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Miscellaneous.", } @InProceedings{Bachmair:1986:CPC, author = "Leo Bachmair and Nachum Dershowitz", title = "Critical-pair criteria for the {Knuth--Bendix} completion procedure", crossref = "Char:1986:PSS", pages = "215--217", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p215-bachmair/", acknowledgement = ack-nhfb, keywords = "languages; theory; verification", subject = "{\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and Other Rewriting Systems, Parallel rewriting systems. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions. {\bf F.2.3} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Tradeoffs between Complexity Measures. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Complexity of proof procedures.", } @InProceedings{Bajaj:1986:LAS, author = "Chanderjit Bajaj", title = "Limitations to algorithm solvability: {Galois} methods and models of computation", crossref = "Char:1986:PSS", pages = "71--76", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p71-bajaj/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf G.2.m} Mathematics of Computing, DISCRETE MATHEMATICS, Miscellaneous. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE, Algorithm design and analysis.", } @InProceedings{Bayer:1986:DMS, author = "D. Bayer and M. Stillman", title = "The design of {Macaulay}: a system for computing in algebraic geometry and commutative algebra", crossref = "Char:1986:PSS", pages = "157--162", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p157-bayer/", acknowledgement = ack-nhfb, keywords = "design; performance; theory", subject = "{\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", } @InProceedings{Beck:1986:SAL, author = "Robert E. Beck and Bernard Kolman", title = "Symbolic algorithms for {Lie} algebra computation", crossref = "Char:1986:PSS", pages = "85--87", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p85-beck/", acknowledgement = ack-nhfb, keywords = "algorithms; performance; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf I.2.2} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Automatic Programming. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on matrices. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, MACSYMA. {\bf K.2} Computing Milieux, HISTORY OF COMPUTING, Systems.", } @InProceedings{Bradford:1986:ERD, author = "R. J. Bradford and A. C. Hearn and J. A. Padget and E. Schr{\"u}fer", title = "Enlarging the {REDUCE} domain of computation", crossref = "Char:1986:PSS", pages = "100--106", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p100-bradford/", acknowledgement = ack-nhfb, keywords = "algorithms; languages; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Computations on discrete structures. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms.", } @InProceedings{Bronstein:1986:GFA, author = "Manuel Bronstein", title = "Gsolve: a faster algorithm for solving systems of algebraic equations", crossref = "Char:1986:PSS", pages = "247--249", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p247-bronstein/", acknowledgement = ack-nhfb, keywords = "algorithms; design; performance; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE, Efficiency. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Systems of equations. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE, Reliability and robustness.", } @InProceedings{Butler:1986:DCC, author = "Greg Butler", title = "Divide-and-conquer in computational group theory", crossref = "Char:1986:PSS", pages = "59--64", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p59-butler/", acknowledgement = ack-nhfb, keywords = "algorithms", subject = "{\bf G.2.0} Mathematics of Computing, DISCRETE MATHEMATICS, General. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Computations on discrete structures. {\bf I.1.0} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, General.", } @InProceedings{Chaffy:1986:HCM, author = "C. Chaffy", title = "How to compute multivariate {Pad{\'e}} approximants", crossref = "Char:1986:PSS", pages = "56--58", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p56-chaffy/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation.", } @InProceedings{Char:1986:CAU, author = "B. W. Char and K. O. Geddes and G. H. Gonnet and B. J. Marshman and P. J. Ponzo", title = "Computer algebra in the undergraduate mathematics classroom", crossref = "Char:1986:PSS", pages = "135--140", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p135-char/", acknowledgement = ack-nhfb, keywords = "algorithms; design; documentation; experimentation; human factors; performance", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Maple. {\bf K.3.1} Computing Milieux, COMPUTERS AND EDUCATION, Computer Uses in Education, Computer-assisted instruction (CAI).", } @InProceedings{Cooperman:1986:SMC, author = "Gene Cooperman", title = "A semantic matcher for computer algebra", crossref = "Char:1986:PSS", pages = "132--134", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p132-cooperman/", acknowledgement = ack-nhfb, keywords = "experimentation; human factors; languages", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Special-purpose algebraic systems. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Evaluation strategies. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Pattern matching. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Representations (general and polynomial). {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, MACSYMA.", } @InProceedings{Czapor:1986:IBA, author = "S. R. Czapor and K. O. Geddes", title = "On implementing {Buchberger}'s algorithm for {Gr{\"o}bner} bases", crossref = "Char:1986:PSS", pages = "233--238", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p233-czapor/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Maple. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials.", } @InProceedings{Davenport:1986:PSM, author = "J. H. Davenport and C. E. Roth", title = "{PowerMath}: a system for the {Macintosh}", crossref = "Char:1986:PSS", pages = "13--15", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p13-davenport/", acknowledgement = ack-nhfb, keywords = "design; theory", subject = "{\bf K.8} Computing Milieux, PERSONAL COMPUTING, Apple. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Special-purpose algebraic systems.", } @InProceedings{Dora:1986:FSL, author = "J. Della Dora and E. Tournier", title = "Formal solutions of linear difference equations: method of {Pincherle--Ramis}", crossref = "Char:1986:PSS", pages = "192--196", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p192-della_dora/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.m} Mathematics of Computing, NUMERICAL ANALYSIS, Miscellaneous. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computation of transforms.", } @InProceedings{Fitch:1986:AIA, author = "J. Fitch and A. Norman and M. A. Moore", title = "Alkahest {III}: automatic analysis of periodic weakly nonlinear {ODEs}", crossref = "Char:1986:PSS", pages = "34--38", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p34-fitch/", acknowledgement = ack-nhfb, keywords = "algorithms; design; human factors; theory", subject = "{\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations. {\bf D.2.2} Software, SOFTWARE ENGINEERING, Design Tools and Techniques, User interfaces.", } @InProceedings{Freeman:1986:SMP, author = "T. Freeman and G. Imirzian and E. Kaltofen", title = "A system for manipulating polynomials given by straight-line programs", crossref = "Char:1986:PSS", pages = "169--175", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p169-freeman/", acknowledgement = ack-nhfb, keywords = "algorithms; design; performance; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Polynomials, methods for.", } @InProceedings{Furukawa:1986:GBM, author = "A. Furukawa and T. Sasaki and H. Kobayashi", title = "The {Gr{\"o}bner} basis of a module over {KUX1,\ldots{},Xne} and polynomial solutions of a system of linear equations", crossref = "Char:1986:PSS", pages = "222--224", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p222-furukawa/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf G.1.3} Mathematics of Computing, NUMERICAL ANALYSIS, Numerical Linear Algebra, Linear systems (direct and iterative methods).", } @InProceedings{Gates:1986:NCG, author = "Barbara L. Gates", title = "A numerical code generation facility for {REDUCE}", crossref = "Char:1986:PSS", pages = "94--99", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p94-gates/", acknowledgement = ack-nhfb, keywords = "design; languages; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf D.3.4} Software, PROGRAMMING LANGUAGES, Processors, Code generation.", } @InProceedings{Gebauer:1986:BAS, author = "R{\"u}diger Gebauer and H. Michael M{\"o}ller", title = "{Buchberger}'s algorithm and staggered linear bases", crossref = "Char:1986:PSS", pages = "218--221", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p218-gebauer/", acknowledgement = ack-nhfb, keywords = "algorithms; measurement; performance; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Geddes:1986:NIS, author = "K. O. Geddes", title = "Numerical integration in a symbolic context", crossref = "Char:1986:PSS", pages = "185--191", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p185-geddes/", acknowledgement = ack-nhfb, keywords = "algorithms; design", subject = "{\bf G.1.4} Mathematics of Computing, NUMERICAL ANALYSIS, Quadrature and Numerical Differentiation. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms.", } @InProceedings{Golden:1986:OAM, author = "J. P. Golden", title = "An operator algebra for {Macsyma}", crossref = "Char:1986:PSS", pages = "244--246", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p244-golden/", acknowledgement = ack-nhfb, keywords = "design; theory; verification", subject = "{\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, MACSYMA. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, MACSYMA.", } @InProceedings{Gonnet:1986:IOS, author = "G. H. Gonnet", title = "An implementation of operators for symbolic algebra systems", crossref = "Char:1986:PSS", pages = "239--243", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p239-gonnet/", acknowledgement = ack-nhfb, keywords = "design; languages; theory", subject = "{\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Representations (general and polynomial). {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", } @InProceedings{Gonnet:1986:NRR, author = "Gaston H. Gonnet", title = "New results for random determination of equivalence of expressions", crossref = "Char:1986:PSS", pages = "127--131", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p127-gonnet/", acknowledgement = ack-nhfb, keywords = "theory", subject = "{\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf G.2.m} Mathematics of Computing, DISCRETE MATHEMATICS, Miscellaneous.", } @InProceedings{Hadzikadic:1986:AKB, author = "M. Hadzikadic and F. Lichtenberger and D. Y. Y. Yun", title = "An application of knowledge-base technology in education: a geometry theorem prover", crossref = "Char:1986:PSS", pages = "141--147", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p141-hadzikadic/", acknowledgement = ack-nhfb, keywords = "algorithms; experimentation; human factors; languages; performance; verification", subject = "{\bf K.3.1} Computing Milieux, COMPUTERS AND EDUCATION, Computer Uses in Education, Computer-assisted instruction (CAI). {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Mechanical theorem proving. {\bf I.2.3} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving.", } @InProceedings{Hayden:1986:SBC, author = "Michael B. Hayden and Edmund A. Lamagna", title = "Summation of binomial coefficients using hypergeometric functions", crossref = "Char:1986:PSS", pages = "77--81", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p77-hayden/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Modes of Computation, Parallelism and concurrency. {\bf I.2.2} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Automatic Programming, Automatic analysis of algorithms. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf G.1.4} Mathematics of Computing, NUMERICAL ANALYSIS, Quadrature and Numerical Differentiation, Iterative methods.", } @InProceedings{Hilali:1986:ACF, author = "A. Hilali and A. Wazner", title = "Algorithm for computing formal invariants of linear differential systems", crossref = "Char:1986:PSS", pages = "197--201", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p197-hilali/", acknowledgement = ack-nhfb, keywords = "algorithms; theory; verification", subject = "{\bf G.1.3} Mathematics of Computing, NUMERICAL ANALYSIS, Numerical Linear Algebra, Eigenvalues and eigenvectors (direct and iterative methods). {\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on matrices. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Jurkovic:1986:EES, author = "N. Jurkovic", title = "Edusym --- educational symbolic manipulator on a microcomputer", crossref = "Char:1986:PSS", pages = "154--156", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p154-jurkovic/", acknowledgement = ack-nhfb, keywords = "human factors; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, MuMATH. {\bf K.3.1} Computing Milieux, COMPUTERS AND EDUCATION, Computer Uses in Education, Computer-assisted instruction (CAI).", } @InProceedings{Kaltofen:1986:FPA, author = "E. Kaltofen and M. Krishnamoorthy and B. D. Saunders", title = "Fast parallel algorithms for similarity of matrices", crossref = "Char:1986:PSS", pages = "65--70", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p65-kaltofen/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.0} Mathematics of Computing, NUMERICAL ANALYSIS, General, Parallel algorithms. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on matrices.", } @InProceedings{Kapur:1986:GTP, author = "Deepak Kapur", title = "Geometry theorem proving using {Hilbert}'s {Nullstellensatz}", crossref = "Char:1986:PSS", pages = "202--208", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p202-kapur/", acknowledgement = ack-nhfb, keywords = "algorithms; theory; verification", subject = "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Logic and constraint programming. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf I.2.3} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Knowles:1986:ILF, author = "P. H. Knowles", title = "Integration of {Liouvillian} functions with special functions", crossref = "Char:1986:PSS", pages = "179--184", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p179-knowles/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.m} Mathematics of Computing, NUMERICAL ANALYSIS, Miscellaneous.", } @InProceedings{Kobayashi:1986:GBI, author = "H. Kobayashi and A. Furukawa and T. Sasaki", title = "Gr{\"o}bner bases of ideals of convergent power series", crossref = "Char:1986:PSS", pages = "225--227", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p225-kobayashi/", acknowledgement = ack-nhfb, keywords = "theory", subject = "{\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf G.m} Mathematics of Computing, MISCELLANEOUS.", } @InProceedings{Kryukov:1986:CRA, author = "A. P. Kryukov and Y. Rodionov and G. L. Litvinov", title = "Construction of rational approximations by means of {REDUCE}", crossref = "Char:1986:PSS", pages = "31--33", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p31-kryukov/", acknowledgement = ack-nhfb, keywords = "algorithms; design; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation, Rational approximation. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Kryukov:1986:DRE, author = "A. P. Kryukov", title = "Dialogue in {REDUCE}: experience and development", crossref = "Char:1986:PSS", pages = "107--109", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p107-kryukov/", acknowledgement = ack-nhfb, keywords = "design; human factors; performance; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf D.2.2} Software, SOFTWARE ENGINEERING, Design Tools and Techniques, User interfaces.", } @InProceedings{Kryukov:1986:URC, author = "A. P. Kryukov and A. Y. Rodionov", title = "Usage of {REDUCE} for computations of group-theoretical weight of {Feynman} diagrams in {non-Abelian} gauge theories", crossref = "Char:1986:PSS", pages = "91--93", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p91-kryukov/", acknowledgement = ack-nhfb, keywords = "algorithms; design; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf G.2.m} Mathematics of Computing, DISCRETE MATHEMATICS, Miscellaneous.", } @InProceedings{Kutzler:1986:AGT, author = "B. Kutzler and S. Stifter", title = "Automated geometry theorem proving using {Buchberger}'s algorithm", crossref = "Char:1986:PSS", pages = "209--214", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p209-kutzler/", acknowledgement = ack-nhfb, keywords = "algorithms; theory; verification", subject = "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Logic and constraint programming. {\bf I.2.3} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Leff:1986:CSG, author = "L. Leff and D. Y. Y. Yun", title = "Constructive solid geometry: a symbolic computation approach", crossref = "Char:1986:PSS", pages = "121--126", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p121-leff/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf J.6} Computer Applications, COMPUTER-AIDED ENGINEERING. {\bf F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and Problems, Geometrical problems and computations. {\bf I.1.m} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Miscellaneous.", } @InProceedings{Leong:1986:IDU, author = "B. L. Leong", title = "{Iris}: design of an user interface program for symbolic algebra", crossref = "Char:1986:PSS", pages = "1--6", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p1-leong/", acknowledgement = ack-nhfb, keywords = "design; human factors; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf D.2.2} Software, SOFTWARE ENGINEERING, Design Tools and Techniques, User interfaces. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Maple. {\bf H.1.2} Information Systems, MODELS AND PRINCIPLES, User/Machine Systems, Human factors.", } @InProceedings{Lucks:1986:FIP, author = "Michael Lucks", title = "A fast implementation of polynomial factorization", crossref = "Char:1986:PSS", pages = "228--232", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p228-lucks/", acknowledgement = ack-nhfb, keywords = "algorithms; design; experimentation; performance; theory", subject = "{\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Polynomials, methods for. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Number-theoretic computations.", } @InProceedings{Mawata:1986:SDR, author = "C. P. Mawata", title = "A sparse distributed representation using prime numbers", crossref = "Char:1986:PSS", pages = "110--114", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p110-mawata/", acknowledgement = ack-nhfb, keywords = "experimentation; performance; theory", subject = "{\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Number-theoretic computations. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Representations (general and polynomial). {\bf G.1.0} Mathematics of Computing, NUMERICAL ANALYSIS, General, Parallel algorithms. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on matrices. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE, Algorithm design and analysis. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms.", } @InProceedings{Purtilo:1986:ASI, author = "J. Purtilo", title = "Applications of a software interconnection system in mathematical problem solving environments", crossref = "Char:1986:PSS", pages = "16--23", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p16-purtilo/", acknowledgement = ack-nhfb, keywords = "design; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf G.m} Mathematics of Computing, MISCELLANEOUS. {\bf D.2.m} Software, SOFTWARE ENGINEERING, Miscellaneous.", } @InProceedings{Renbao:1986:CAS, author = "Z. Renbao and X. Ling and R. Zhaoyang", title = "The computer algebra system {CAS1} for the {IBM-PC}", crossref = "Char:1986:PSS", pages = "176--178", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p176-renbao/", acknowledgement = ack-nhfb, keywords = "design; theory", subject = "{\bf K.8} Computing Milieux, PERSONAL COMPUTING, IBM PC. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Special-purpose algebraic systems. {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions.", } @InProceedings{Sasaki:1986:SAE, author = "Tateaki Sasaki", title = "Simplification of algebraic expression by multiterm rewriting rules", crossref = "Char:1986:PSS", pages = "115--120", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p115-sasaki/", acknowledgement = ack-nhfb, keywords = "algorithms; design; languages", subject = "{\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation, Simplification of expressions. {\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and Other Rewriting Systems, Parallel rewriting systems.", } @InProceedings{Seymour:1986:CCM, author = "Harlan R. Seymour", title = "Conform: a conformal mapping system", crossref = "Char:1986:PSS", pages = "163--168", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p163-seymour/", acknowledgement = ack-nhfb, keywords = "design; languages; performance; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf D.3.2} Software, PROGRAMMING LANGUAGES, Language Classifications, LISP. {\bf D.3.3} Software, PROGRAMMING LANGUAGES, Language Constructs and Features.", } @InProceedings{Shavlik:1986:CUG, author = "Jude W. Shavlik and Gerald F. DeJong", title = "Computer understanding and generalization of symbolic mathematical calculations: a case study in physics problem solving", crossref = "Char:1986:PSS", pages = "148--153", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p148-shavlik/", acknowledgement = ack-nhfb, keywords = "design; human factors; languages; performance; theory; verification", subject = "{\bf I.2.6} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Learning. {\bf K.3.1} Computing Milieux, COMPUTERS AND EDUCATION, Computer Uses in Education, Computer-assisted instruction (CAI). {\bf I.1.1} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Expressions and Their Representation. {\bf I.2.1} Computing Methodologies, ARTIFICIAL INTELLIGENCE, Applications and Expert Systems. {\bf J.2} Computer Applications, PHYSICAL SCIENCES AND ENGINEERING, Physics. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Substitution mechanisms**. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, Evaluation strategies. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms.", } @InProceedings{Smith:1986:MUI, author = "C. J. Smith and N. Soiffer", title = "{MathScribe}: a user interface for computer algebra systems", crossref = "Char:1986:PSS", pages = "7--12", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p7-smith/", acknowledgement = ack-nhfb, keywords = "design; human factors; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf D.2.2} Software, SOFTWARE ENGINEERING, Design Tools and Techniques, User interfaces.", } @InProceedings{Yun:1986:FCF, author = "D. Y. Y. Yun and C. N. Zhang", title = "A fast carry-free algorithm and hardware design for extended integer {GCD} computation", crossref = "Char:1986:PSS", pages = "82--84", year = "1986", bibdate = "Thu Mar 12 07:38:29 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/32439/p82-yun/", acknowledgement = ack-nhfb, keywords = "algorithms; design; theory", subject = "{\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Number-theoretic computations. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf G.4} Mathematics of Computing, MATHEMATICAL SOFTWARE, Algorithm design and analysis. {\bf B.7.1} Hardware, INTEGRATED CIRCUITS, Types and Design Styles, Algorithms implemented in hardware.", } @InProceedings{A:1989:SSG, author = "R. A. and J. r. Ravenscroft and E. A. Lamagna", title = "Symbolic summation with generating functions", crossref = "Gonnet:1989:PAI", pages = "228--233", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p228-ravenscroft/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.2.1} Mathematics of Computing, DISCRETE MATHEMATICS, Combinatorics, Generating functions. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Analysis of algorithms. {\bf G.1.3} Mathematics of Computing, NUMERICAL ANALYSIS, Numerical Linear Algebra, Linear systems (direct and iterative methods).", } @InProceedings{Abbot:1989:RAN, author = "J. Abbot", title = "Recovery of algebraic numbers from their $p$-adic approximations", crossref = "Gonnet:1989:PAI", pages = "112--120", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author describes three ways to generalize Lenstra's algebraic integer recovery method. One direction adapts the algorithm so that rational numbers are automatically produced given only upper bounds on the sizes of the numerators and denominators. Another direction produces a variant which recovers algebraic numbers as elements of multiple generator algebraic number fields. The third direction explains how the method can work if a reducible minimal polynomial had been given for an algebraic generator. Any two or all three of the generalisations may be employed simultaneously.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Rensselaer Polytech. Inst., Troy, NY, USA", classification = "C1110 (Algebra); C4130 (Interpolation and function approximation); C4240 (Programming and algorithm theory)", keywords = "Algebraic generator; Algebraic integer recovery method; Algebraic numbers; Computer algebra; Denominators; Factorisation; Lenstra; Multiple generator algebraic number fields; Numerators; P-adic approximations; Rational numbers; Reducible minimal polynomial; Upper bounds", thesaurus = "Computation theory; Number theory; Polynomials; Symbol manipulation", } @InProceedings{Abbott:1989:RAN, author = "John Abbott", title = "Recovery of algebraic numbers from their $p$-adic approximations", crossref = "Gonnet:1989:PAI", pages = "112--120", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p112-abbott/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials.", } @InProceedings{Abdali:1989:EQR, author = "S. K. Abdali and D. S. Wiset", title = "Experiments with quadtree representation of matrices", crossref = "Gianni:1989:SAC", pages = "96--108", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The quadtrees matrix representation has been recently proposed as an alternative to the conventional linear storage of matrices. If all elements of a matrix are zero, then the matrix is represented by an empty tree; otherwise it is represented by a tree consisting of four subtrees, each representing, recursively, a quadrant of the matrix. Using four-way block decomposition, algorithms on quadtrees accelerate on blocks entirely of zeros, and thereby offer improved performance on sparse matrices. The paper reports the results of experiments done with a quadtree matrix package implemented in REDUCE to compare the performance of quadtree representation with REDUCE's built-in sequential representation of matrices. Tests on addition, multiplication, and inversion of dense, triangular, tridiagonal, and diagonal matrices (both symbolic and numeric) of sizes up to 100*100 show that the quadtree algorithms perform well in a broad range of circumstances, sometimes running orders of magnitude faster than their sequential counterparts.", acknowledgement = ack-nhfb, affiliation = "Tektronix Labs., Beaverton, OR, USA", classification = "C1110 (Algebra); C1160 (Combinatorial mathematics); C4140 (Linear algebra); C6120 (File organisation); C7310 (Mathematics)", keywords = "Addition; Dense matrices; Diagonal matrices; Empty tree; Four-way block decomposition; Inversion; Multiplication; Performance comparison; Quadrant; Quadtree algorithms; Quadtree matrix package; Quadtrees matrix representation; REDUCE; Sparse matrices; Subtrees; Triangular matrices; Tridiagonal matrices; Zero elements", thesaurus = "Data structures; Mathematics computing; Matrix algebra; Trees [mathematics]", } @InProceedings{Abdulrab:1989:EW, author = "H. Abdulrab", title = "Equations in words", crossref = "Gianni:1989:SAC", pages = "508--520", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The study of equations in words was introduced by Lentin (1972). There is always a solution for any equation with no constant. Makanin (1977) showed that solving equations with constants is decidable. Pecuchet (1981) unified the two theories of equations with or without constants and gave a new description of Makanin's algorithm. This paper describes some new results in the field of solving equations in words.", acknowledgement = ack-nhfb, affiliation = "LITP, Fac. des Sci., Mont Saint Aignan, France", classification = "C4210 (Formal logic)", keywords = "Decidable; Equations in words", thesaurus = "Decidability", } @InProceedings{Abhyankar:1989:CAC, author = "S. S. Abhyankar and C. L. Bajaj", title = "Computations with algebraic curves", crossref = "Gianni:1989:SAC", pages = "274--284", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors present a variety of computational techniques dealing with algebraic curves both in the plane and in space. The main results are polynomial time algorithms: (1) to compute the genus of plane algebraic curves; (2) to compute the rational parametric equations for implicitly defined rational plane algebraic curves of arbitrary degree; (3) to compute birational mappings between points on irreducible space curves and points on projected plane curves and thereby to compute the genus and rational parametric equations for implicitly defined rational space curves of arbitrary degree; and (4) to check for the faithfulness (one to one) of parameterizations.", acknowledgement = ack-nhfb, affiliation = "Purdue Univ., West Lafayette, IN, USA", classification = "C4130 (Interpolation and function approximation); C4190 (Other numerical methods)", keywords = "Algebraic curves; Birational mappings; Computational techniques; Irreducible space curves; Polynomial time algorithms; Rational parametric equations", thesaurus = "Computational geometry; Polynomials", } @InProceedings{Alonso:1989:CAS, author = "M. E. Alonso and T. Mora and M. Raimondo", title = "Computing with algebraic series", crossref = "Gonnet:1989:PAI", pages = "101--111", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p101-alonso/", abstract = "The authors develop a computational model for algebraic formal power series, based on a symbolic codification of the series by means of the implicit function theorem: i.e. they consider algebraic series as the unique solutions of suitable functional equations. They show that most of the usual local commutative algebra can be effectively performed on algebraic series, since they can reduce to the polynomial case, where the tangent cone algorithm can be used to effectively perform local algebra. The main result to the paper is an effective version of Weierstrass theorems, which allows effective elimination theory for algebraic series and an effective noether normalization lemma.", acknowledgement = ack-nhfb, affiliation = "Univ. Complutense, Madrid, Spain", classification = "C1110 (Algebra); C1120 (Analysis); C4150 (Nonlinear and functional equations); C4240 (Programming and algorithm theory)", keywords = "Algebraic formal power series; Algebraic series; algorithms; Computational model; Elimination theory; Functional equations; Implicit function theorem; Local commutative algebra; Noether normalization lemma; Polynomial; Symbolic codification; Tangent cone algorithm; theory; Weierstrass theorems", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Computational logic.", thesaurus = "Computability; Functional equations; Polynomials; Series [mathematics]; Symbol manipulation", } @InProceedings{Arnborg:1989:EPO, author = "S. Arnborg", title = "Experiments with a projection operator for algebraic decomposition", crossref = "Gianni:1989:SAC", pages = "177--182", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Reports an experiment with the projection phase of an algebraic decomposition problem. The decomposition asked for is a collection of rational sample points, at least one in each full-dimensional region of a decomposition, sign-invariant with respect to a set of polynomials and with a cylindrical structure. Such a decomposition is less general than a cylindrical algebraic decomposition, but still useful for purposes such as solving collision and motion planning problems in theoretical robotics. Specifically, there is no information about the structure of less than full-dimensional regions and intersections between projections of regions. This makes quantifier elimination with alternating quantifiers difficult or impossible.", acknowledgement = ack-nhfb, affiliation = "Dept. of Numer. Anal. and Comput. Sci., R. Inst. of Technol., Stockholm, Sweden", classification = "C1110 (Algebra)", keywords = "Algebraic decomposition; Cylindrical structure; Full-dimensional region; Polynomials; Projection operator; Projection phase; Rational sample points; Sign-invariant", thesaurus = "Algebra; Polynomials", } @InProceedings{Ausiello:1989:DMP, author = "G. Ausiello and A. Marchetti Spaccamela and U. Nanni", title = "Dynamic maintenance of paths and path expressions on graphs", crossref = "Gianni:1989:SAC", pages = "1--12", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In several applications it is necessary to deal with data structures that may dynamically change during a sequence of operations. In these cases the classical worst case analysis of the cost of a single operation may not adequately describe the behaviour of the structure but it is rather more meaningful to analyze the cost of the whole sequence of operations. The paper first discusses some results on maintaining paths in dynamic graphs. Besides, it considers paths problems on dynamic labeled graphs and shows how to maintain path expressions in the acyclic case when insertions of new arcs are allowed.", acknowledgement = ack-nhfb, affiliation = "Dipartimento di Inf. e Sistemistica, Rome Univ., Italy", classification = "C1160 (Combinatorial mathematics); C4240 (Programming and algorithm theory); C6120 (File organisation)", keywords = "Acyclic case; Data structures; Dynamic graphs; Dynamic labeled graphs; Dynamic maintenance; Insertions; New arcs; Path expressions; Paths problems", thesaurus = "Computational complexity; Data structures; Graph theory", } @InProceedings{Avenhaus:1989:URT, author = "J. Avenhaus and D. Wi{\ss}mann", title = "Using rewriting techniques to solve the generalized word problem in polycyclic groups", crossref = "Gonnet:1989:PAI", pages = "322--337", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p322-avenhaus/", abstract = "The authors apply rewriting techniques to the generalized word problem GWP in polycyclic groups. They assume the group $G$ to be given by a canonical polycyclic string-rewriting system $R$ and consider GWP in $G$ which is defined by $GWP(w,U)$ iff $w$ in $$ for $w$ in $G$, finite $U$ contained in $G$, where $$ is the subgroup of $G$ generated by $U$. They describe $$ also by a rewrite system $S$ and define a rewrite relation $\mbox{implies}_{S,R}$ in such a way that $w$ implied by * $\mbox{implies}_{S,R} \lambda$ iff $w$ in $$ ($\lambda$ the empty word). For this rewrite relation the authors develop different critical pair criteria for $\mbox{implies}_{S,R}$ to be $\lambda$-confluent, i.e. confluent on the left-congruence class $(\lambda )$ of implied by * $\mbox{implies}_{S,R}$. Using any of these $\lambda$-confluence criteria they construct a completion procedure which stops for every input $S$ and computes a $\lambda$-confluent rewrite system equivalent to $S$. This leads to a decision procedure for GWP in G. Thus the authors give an explicit uniform algorithm for deciding GWP in polycyclic groups and a new proof based almost only on rewriting techniques for the decidability of this problem. Further, they define a rewrite relation $\mbox{implies}_{LM,U}$ which is stronger than $\mbox{implies}_{S,R}$. They show that if $G$ is given by a nilpotent string-rewriting system, then by a completion procedure the input $U$ can be transformed into $V$ such that $\mbox{implies}_{LM,V}$ is even confluent, not just $\lambda$-confluent.", acknowledgement = ack-nhfb, affiliation = "Fachbereich Inf., Kaiserslautern Univ., West Germany", classification = "C1110 (Algebra); C4210 (Formal logic)", keywords = "$\Lambda$-confluent; algorithms; Canonical polycyclic string-rewriting system; Completion procedure; Critical pair criteria; Decidability; design; Explicit uniform algorithm; Generalized word problem; Group theory; Nilpotent string-rewriting system; Polycyclic groups; Rewrite relation; Rewriting techniques; theory", subject = "{\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and Other Rewriting Systems. {\bf I.1.0} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, General.", thesaurus = "Decidability; Group theory; Rewriting systems; Symbol manipulation", } @InProceedings{Bajaj:1989:FRP, author = "C. Bajaj and J. Canny and T. Garrity and J. Warren", title = "Factoring rational polynomials over the complexes", crossref = "Gonnet:1989:PAI", pages = "81--90", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p81-bajaj/", abstract = "The authors give NC algorithms for determining the number and degrees of the absolute factors (factors irreducible over the complex numbers $C$) of a multivariate polynomial with rational coefficients. NC is the class of functions computable by logspace-uniform boolean circuits of polynomial size and polylogarithmic dept. The measures of size of the input polynomial are its degree $d$, coefficient length $c$, number of variables $n$, and for sparse polynomials, the number of nonzero coefficients $s$. For the general case, the authors give a random (Monte-Carlo) NC algorithm in these input measures. If $n$ is fixed, or if the polynomial is dense, they give a deterministic NC algorithm. The algorithm also works in random NC for polynomial represented by straight-line programs, provided the polynomial can be evaluated at integer points in NC. The authors discuss a method for obtaining an approximation to the coefficients of each factor whose running time is polynomial in the size of the original (dense) polynomial. These methods rely on the fact that the connected components of a complex hypersurface $P(z_1,\ldots{},z_n)=0$ minus its singular points correspond to the absolute factors of $P$.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Purdue Univ., Lafayette, IN, USA", classification = "C1110 (Algebra); C1160 (Combinatorial mathematics); C4240 (Programming and algorithm theory)", keywords = "Absolute factors; algorithms; Complex numbers; Factorisation; Functions; Logspace-uniform boolean circuits; measurement; Monte-Carlo; Multivariate polynomial; NC algorithms; Rational coefficients; Rational polynomials; Set theory; theory; verification", subject = "{\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Mechanical theorem proving.", thesaurus = "Approximation theory; Computability; Computational complexity; Monte Carlo methods; Polynomials; Set theory; Symbol manipulation", xxauthor = "C. Bajaj and J. Canny and R. Garrity and J. Warren", } @InProceedings{Barkatou:1989:RLS, author = "M. A. Barkatou", title = "On the reduction of linear systems of difference equations", crossref = "Gonnet:1989:PAI", pages = "1--6", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p1-barkatou/", abstract = "The author deals with linear systems of difference equations whose coefficients admit generalized factorial series representations at $z=\infty$. He gives a criterion by which a given system is determined to have a regular singularity. He gives an algorithm, implementable in a computer algebra system, which reduces in a finite number of steps the system of difference equations on an irreducible form.", acknowledgement = ack-nhfb, affiliation = "Lab. TIM3-IMAG, Grenoble, France", classification = "C1120 (Analysis); C4170 (Differential equations); C7310 (Mathematics)", keywords = "algorithms; Computer algebra system; Convergence; Generalized factorial series; Irreducible form; Linear difference equations; Regular singularity; theory", subject = "{\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations. {\bf G.1.3} Mathematics of Computing, NUMERICAL ANALYSIS, Numerical Linear Algebra, Linear systems (direct and iterative methods).", thesaurus = "Convergence; Difference equations; Linear differential equations; Mathematics computing; Matrix algebra; Series [mathematics]; Symbol manipulation", } @InProceedings{Barkatou:1989:RNA, author = "M. A. Barkatou", title = "Rational {Newton} algorithm for computing formal solutions of linear differential equations", crossref = "Gianni:1989:SAC", pages = "183--195", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Presents a new algorithm for solving linear differential equations in the neighbourhood of an irregular singular point. This algorithm is based upon the same principles as the Newton algorithm, however it has a lower cost and is more suitable for computing algebra.", acknowledgement = ack-nhfb, affiliation = "CNRS, INPG, IMAG, Grenoble, France", classification = "C1120 (Analysis); C4170 (Differential equations)", keywords = "Formal solutions; Irregular singular point; Linear differential equations; Neighbourhood; Rational Newton algorithm", thesaurus = "Linear differential equations", } @InProceedings{BoydelaTour:1989:FAS, author = "T. {Boy de la Tour} and R. Caferra", title = "A formal approach to some usually informal techniques used in mathematical reasoning", crossref = "Gianni:1989:SAC", pages = "402--406", year = "1989", bibdate = "Mon Dec 01 16:57:16 1997", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "One of the striking characteristics of mathematical reasoning is the contrast between the formal aspects of mathematical truth and the informal character of the ways to that truth. Among the many important and usually informal mathematical activities the authors are interested in proof analogy (i.e. common pattern between proofs of different theorems) in the context of interactive theorem proving.", acknowledgement = ack-nhfb, affiliation = "LIFIA-INPG, Grenoble, France", classification = "C4210 (Formal logic)", keywords = "Formal approach; Informal character; Interactive theorem proving; Mathematical reasoning; Mathematical truth; Usually informal techniques", thesaurus = "Theorem proving", } @InProceedings{Bradford:1989:ETC, author = "R. J. Bradford and J. H. Davenport", title = "Effective tests for cyclotomic polynomials", crossref = "Gianni:1989:SAC", pages = "244--251", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors present two efficient tests that determine if a given polynomial is cyclotomic, or is a product of cyclotomics. The first method uses the fact that all the roots of a cyclotomic polynomial are roots of unity, and the second the fact that the degree of a cyclotomic polynomial is a value of $\phi (n)$, for some $n$. The authors also find the cyclotomic factors of any polynomial.", acknowledgement = ack-nhfb, affiliation = "Sch. of Math. Sci., Bath Univ., UK", classification = "C4130 (Interpolation and function approximation)", keywords = "Cyclotomic polynomials; Roots", thesaurus = "Polynomials", } @InProceedings{Bradford:1989:SRD, author = "R. Bradford", title = "Some results on the defect", crossref = "Gonnet:1989:PAI", pages = "129--135", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p129-bradford/", abstract = "The defect of an algebraic number field (or, more correctly, of a presentation of the field) is the largest rational integer that divides the denominator of any algebraic integer in the field when written using that presentation. Knowing the defect, or estimating it accurately is extremely valuable in many algorithms, the factorization of polynomials over algebraic number fields being a prime example. The author presents a few results that are a move in the right direction.", acknowledgement = ack-nhfb, affiliation = "Sch. of Math. Sci., Bath Univ., UK", classification = "C1110 (Algebra); C1160 (Combinatorial mathematics); C4130 (Interpolation and function approximation); C4240 (Programming and algorithm theory)", keywords = "Algebraic integer; Algebraic number field; algorithms; Defect; Factorization; Polynomials; Rational integer; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms. {\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation. {\bf G.1.4} Mathematics of Computing, NUMERICAL ANALYSIS, Quadrature and Numerical Differentiation. {\bf G.1.9} Mathematics of Computing, NUMERICAL ANALYSIS, Integral Equations.", thesaurus = "Computation theory; Number theory; Polynomials; Symbol manipulation", } @InProceedings{Bronstein:1989:FRR, author = "M. Bronstein", title = "Fast reduction of the {Risch} differential equation", crossref = "Gianni:1989:SAC", pages = "64--72", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Presents a weaker definition of weak-normality which: can always be obtained in a tower of transcendental elementary extensions, and gives an explicit formula for the denominator of $y$, so the equation $y'+fy=g$ can be reduced to a polynomial equation in one reduction step. As a consequence, a new algorithm is obtained for solving y'+fy=g. The algorithm is very similar to the one described by Rothstein (1976), except that the present one uses weak normality to prevent finite cancellation, rather than having to find integer roots of polynomials over the constant field of $K$ in order to detect it.", acknowledgement = ack-nhfb, affiliation = "IBM Thomas J. Watson Res. Center, Yorktown Heights, NY, USA", classification = "C1120 (Analysis); C4170 (Differential equations)", keywords = "Denominator; Explicit formula; Fast reduction; Polynomial equation; Reduction step; Risch differential equation; Transcendental elementary extensions; Weak-normality", thesaurus = "Differential equations", } @InProceedings{Bronstein:1989:SRE, author = "M. Bronstein", title = "Simplification of real elementary functions", crossref = "Gonnet:1989:PAI", pages = "207--211", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p207-bronstein/", abstract = "The author describes an algorithm, based on Risch's real structure theorem, that determines explicitly all the algebraic relations among a given set of real elementary functions. He provides examples from its implementation in the scratchpad computer algebra system that illustrate the advantages over the use of complex logarithms and exponentials.", acknowledgement = ack-nhfb, affiliation = "IBM Res. Div., T. J. Watson Res. Center, Yorktown Heights, NY, USA", classification = "C1110 (Algebra); C7310 (Mathematics)", keywords = "algorithms; Computer algebra system; Real elementary functions; Real structure theorem; Scratchpad; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations.", thesaurus = "Functions; Mathematics computing; Symbol manipulation", } @InProceedings{Brown:1989:SPP, author = "C. Brown and G. Cooperman and L. Finkelstein", title = "Solving permutation problems using rewriting systems", crossref = "Gianni:1989:SAC", pages = "364--377", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A new approach is described for finding short expressions for arbitrary elements of a permutation group in terms of the original generators which uses rewriting methods. This forms an important component in a long term plan to find short solutions for `large' permutation problems (such as Rubik's cube), which are difficult to solve by existing search techniques. In order for this methodology to be successful, it is important to start with a short presentation for a finite permutation group. A new method is described for giving a presentation for an arbitrary permutation group acting on $n$ letters. This can be used to show that any such permutation group has a presentation with at most $n-1$ generators and $(n-1)^2$ relations. As an application of this method, an $O(n^4)$ algorithm is described for determining if a set of generators for a permutation group of $n$ letters is a strong generating set (in the sense of Sims). The `back end' includes a novel implementation of the Knuth--Bendix technique on symmetrization classes for groups.", acknowledgement = ack-nhfb, affiliation = "Coll. of Comput. Sci., Northeastern Univ., Boston, MA, USA", classification = "C4210 (Formal logic)", keywords = "Knuth--Bendix technique; Permutation problems; Rewriting systems", thesaurus = "Rewriting systems", } @InProceedings{Butler:1989:CVU, author = "G. Butler and J. Cannon", title = "{Cayley}, version 4: the user language", crossref = "Gianni:1989:SAC", pages = "456--466", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Cayley, version 4, is a proposed knowledge-based system for modern algebra. The proposal integrates the existing powerful algorithm base of Cayley with modest deductive facilities and large sophisticated databases of groups and related algebraic structures. The outcome will be a revolutionary computer algebra system. The user language of Cayley, version 4, is the first stage of the project to develop a computer algebra system which integrates algorithmic, deductive, and factual knowledge. The language plays an important role in shaping the users' communication of their knowledge to the system, and in presenting the results to the user. The very survival of a system depends upon its acceptance by the users, so the language must be natural, extensible, and powerful. The major changes in the language (over version 3) are the definitions of algebraic structures, set constructors and associated control structures, the definitions of maps and homomorphisms, the provision of packages for procedural abstraction and encapsulation, database facilities, and other input/output. The motivation for these changes has been the need to provide facilities for a knowledge-based system; to allow sets to be defined by properties; and to remove semantic ambiguities of structure definitions.", acknowledgement = ack-nhfb, affiliation = "Sydney Univ., NSW, Australia", classification = "C6170 (Expert systems); C7310 (Mathematics)", keywords = "Algebra; Algebraic structures; Associated control structures; Cayley; Computer algebra system; Deductive facilities; Encapsulation; Factual knowledge; Homomorphisms; Knowledge-based system; Procedural abstraction; Set constructors; Sophisticated databases; User language; Version 4", thesaurus = "Knowledge based systems; Symbol manipulation", } @InProceedings{Cabay:1989:FRA, author = "S. Cabay and G. Labahn", title = "A fast, reliable algorithm for calculating {Pad{\'e}--Hermite} forms", crossref = "Gonnet:1989:PAI", pages = "95--100", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p95-cabay/", abstract = "The authors present a new fast algorithm for the calculation of a Pad{\'e}--Hermite form for a vector of power series. When the vector of power series is normal, the algorithm is shown to calculate a Pad{\'e}--Hermite form of type $(n_0, \ldots{}, n_k)$ in $O(k.(n_0^2+\ldots{} +n_k^2))$ operations. This complexity is the same as that of other fast algorithms for computing Pad{\'e}--Hermite approximants. However, unlike other algorithms, the new algorithm also succeeds in the nonnormal case, usually with only a moderate increase in cost.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Alberta Univ., Edmonton, Alta., Canada", classification = "C1120 (Analysis); C4130 (Interpolation and function approximation); C4240 (Programming and algorithm theory)", keywords = "algorithms; Complexity; Iterative methods; Nonnormal case; Pad{\'e}--Hermite approximants; Pad{\'e}--Hermite forms; theory; Vector of power series", subject = "{\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems. {\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation. {\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations. {\bf G.1.9} Mathematics of Computing, NUMERICAL ANALYSIS, Integral Equations.", thesaurus = "Computational complexity; Iterative methods; Linear differential equations; Series [mathematics]; Vectors", } @InProceedings{Canny:1989:GCP, author = "J. Canny", title = "Generalized characteristic polynomials", crossref = "Gianni:1989:SAC", pages = "293--299", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author generalises the notion of characteristic polynomial for a system of linear equations to systems of multivariate polynomial equations. The generalization is natural in the sense that it reduces to the usual definition when all the polynomials are linear. Whereas the constant coefficient of the characteristic polynomial of a linear system is the determinant, the constant coefficient of the general characteristic polynomial is the resultant of the system. This construction is applied to solve a traditional problem with efficient methods for solving systems of polynomial equations: the presence of infinitely many solutions `at infinity'. The author gives a single-exponential time method for finding all the isolated solution points of a system of polynomials, even in the presence of infinitely many solutions at infinity or elsewhere.", acknowledgement = ack-nhfb, affiliation = "Div. of Comput. Sci., California Univ., Berkeley, CA, USA", classification = "C4130 (Interpolation and function approximation)", keywords = "Generalised characteristic polynomials; Multivariate polynomial equations; Single-exponential time method; System of linear equations", thesaurus = "Polynomials", } @InProceedings{Canny:1989:SSN, author = "J. F. Canny and E. Kaltofen and L. Yagati", title = "Solving systems of non-linear polynomial equations faster", crossref = "Gonnet:1989:PAI", pages = "121--128", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p121-canny/", abstract = "Finding the solution to a system of $n$ non-linear polynomial equations in $n$ unknowns over a given field, say the algebraic closure of the coefficient field, is a classical and fundamental problem in computational algebra. The authors give a method that allows the computation of resultants and $u$-resultants of polynomial systems in essentially linear space and quadratic time. The algorithm constitutes the first improvement over Gaussian elimination-based methods for computing these fundamental invariants.", acknowledgement = ack-nhfb, affiliation = "Div. of Comp. Sci., California Univ., Berkeley, CA, USA", classification = "C1110 (Algebra); C1120 (Analysis); C4130 (Interpolation and function approximation); C4150 (Nonlinear and functional equations); C4240 (Programming and algorithm theory)", keywords = "Algebraic closure; algorithms; Coefficient field; Computational algebra; Computational complexity; Linear space; Nonlinear polynomial equations; Quadratic time; Resultants; theory; U-resultants", subject = "{\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations, Systems of equations. {\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms. {\bf G.1.1} Mathematics of Computing, NUMERICAL ANALYSIS, Interpolation.", thesaurus = "Computational complexity; Nonlinear equations; Polynomials; Symbol manipulation", } @InProceedings{Cantone:1989:DPE, author = "D. Cantone and V. Cutello and A. Ferro", title = "Decision procedures for elementary sublanguages of set theory. {XIV}. {Three} languages involving rank related constructs", crossref = "Gianni:1989:SAC", pages = "407--422", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors present three decidability results for some quantifier-free and quantified theories of sets involving rank related constructs.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Courant Inst. of Math. Sci., New York Univ., NY, USA", classification = "C1160 (Combinatorial mathematics); C4210 (Formal logic)", keywords = "Decidability results; Decision procedures; Elementary sublanguages; Quantified theories; Quantifier-free; Rank related constructs; Set theory", thesaurus = "Decidability; Formal logic; Set theory", } @InProceedings{Caprasse:1989:CEB, author = "H. Caprasse and J. Demaret and E. Schrufer", title = "Can {EXCALC} be used to investigate high-dimensional cosmological models with nonlinear {Lagrangians}?", crossref = "Gianni:1989:SAC", pages = "116--124", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Recent work in cosmology is characterized by the extension of the traditional four-dimensional general relativity models in two directions: Kaluza--Klein type models which have more than four dimensions, and models with Lagrangians containing nonlinear terms in the Riemann curvature tensor and its contractions. The package EXCALC 2 seems particularly well suited to investigate these models further. The implementation of all operations of EXTERIOR CALCULUS opens the way to perform these calculations efficiently. The article presents the current stage of investigation in this direction.", acknowledgement = ack-nhfb, affiliation = "Inst. de Phys., Liege Univ., Belgium", classification = "A9575P (Mathematical and computer techniques); A9880D (Theoretical cosmology); C7350 (Astronomy and astrophysics)", keywords = "Contractions; Cosmology; EXCALC 2; Four-dimensional general relativity models; High-dimensional cosmological models; Kaluza--Klein type models; Nonlinear Lagrangians; Package; Riemann curvature tensor", thesaurus = "Astronomy computing; Astrophysics computing; Cosmology; Software packages", } @InProceedings{ChaffyCamus:1989:ARA, author = "C. Chaffy-Camus", title = "An application of {REDUCE} to the approximation of $f(x,y)$", crossref = "Gianni:1989:SAC", pages = "73--84", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Pad{\'e} approximants are an important tool in numerical analysis, to evaluate $f(x)$ from its power series even outside the disk of convergence, or to locate its singularities. The paper generalizes this process to the multivariate case and presents two applications of this method: the approximation of implicit curves and the approximation of double power series. Computations are carried out on a computer algebra system REDUCE.", acknowledgement = ack-nhfb, affiliation = "TIM3-INPG, Grenoble, France", classification = "C4130 (Interpolation and function approximation); C7310 (Mathematics)", keywords = "Approximation; Computer algebra system; Convergence; Double power series; Implicit curves; Multivariate case; Numerical analysis; Pad{\'e} approximants; Reduce; Singularities", thesaurus = "Approximation theory; Convergence of numerical methods; Mathematics computing; Software packages", } @InProceedings{Char:1989:ARA, author = "B. W. Char", title = "Automatic reasoning about numerical stability of rational expressions", crossref = "Gonnet:1989:PAI", pages = "234--241", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p234-char/", abstract = "While numerical (e.g. Fortran) code generation from computer algebra is nowadays relatively easy to do, the expressions as they are produced via computer algebra typically benefit from nontrivial reformulation for efficiency and numerical stability. To assist in automatic `expert reformulation', we desire good automated tools to assess the stability of a particular form of an expression. The author discusses an approach to proofs of numerical stability (with respect to roundoff error) of rational expressions. The proof technique is based upon the ability to propagate properties such as sign, exact representability, or a certain kind of numerical stability, to floating point results from properties of their antecedents. The qualitative approach to numerical stability lends itself to implementation as a backwards-chaining theorem prover. While it is not a replacement for alternative forms of stability analysis, it can sometimes discover stability and explain it straightforwardly.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Tennessee Univ., Knoxville, TN, USA", classification = "C4100 (Numerical analysis); C7310 (Mathematics)", keywords = "algorithms; Backwards-chaining theorem prover; Code generation; Computer algebra; Floating point; Numerical stability; Rational expressions; Roundoff error; theory", subject = "{\bf I.1.0} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, General. {\bf D.3.4} Software, PROGRAMMING LANGUAGES, Processors, Code generation. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Mechanical theorem proving. {\bf G.1.0} Mathematics of Computing, NUMERICAL ANALYSIS, General, Computer arithmetic.", thesaurus = "Automatic programming; Convergence of numerical methods; Mathematics computing; Symbol manipulation", } @InProceedings{Char:1989:DIC, author = "B. W. Char and A. R. Macnaughton and P. A. Strooper", title = "Discovering inequality conditions in the analytical solutions of optimization problems", crossref = "Gianni:1989:SAC", pages = "109--115", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The Kuhn--Tucker conditions can provide an analytic solution to the problem of maximizing or minimizing a function subject to inequality constraints, if the artificial variables known as Lagrange multipliers can be eliminated. The paper describes an automated reasoning program that assists in the solution process. The program may also be useful for other problems involving algebraic reasoning with inequalities.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Tennessee Univ., Knoxville, TN, USA", classification = "C1180 (Optimisation techniques); C1230 (Artificial intelligence); C7310 (Mathematics)", keywords = "Algebraic reasoning; Analytic solution; Artificial variables; Automated reasoning program; Function maximization; Function minimization; Inequality conditions; Inequality constraints; Kuhn--Tucker conditions; Lagrange multipliers; Optimization problems", thesaurus = "Inference mechanisms; Mathematics computing; Optimisation", } @InProceedings{Chen:1989:CNF, author = "Guoting Chen", title = "Computing the normal forms of matrices depending on parameters", crossref = "Gonnet:1989:PAI", pages = "242--249", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p242-chen/", abstract = "The author considers an algorithm for the exact computation of the Frobenius, Jordan and Arnold's form of matrices depending holomorphically on parameters. The problem originates from the problem of formal resolution of a first order system of differential equations depending on parameter. This algorithm has been implemented in Macsyma.", acknowledgement = ack-nhfb, affiliation = "Equipe de Calcul Formel et Algorithmique Parallele, Laboratoire TIM3-IMAG, Grenoble, France", classification = "C1110 (Algebra); C1120 (Analysis); C4140 (Linear algebra); C4170 (Differential equations); C7310 (Mathematics)", keywords = "algorithms; design; Differential equations; Formal resolution; Macsyma; Matrices; Normal forms; theory", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms. {\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations.", thesaurus = "Differential equations; Mathematics computing; Matrix algebra; Symbol manipulation", } @InProceedings{Collins:1989:PRP, author = "G. E. Collins and J. R. Johnson", title = "The probability of relative primality of {Gaussian} integers", crossref = "Gianni:1989:SAC", pages = "252--258", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors generalize, to an arbitrary number field, the theorem which gives the probability that two integers are relatively prime. The probability that two integers are relatively prime is $ 1/ \zeta (2)$, where $\zeta$ is the Riemann $\zeta$ function and $1/\zeta(2)=6/\pi^2$. The theorem for an arbitrary number field states that the probability that two ideals are relatively prime is the reciprocal of the $\zeta$ function of the number field evaluated at two. In particular, since the Gaussian integers are a unique factorization domain, the authors get the probability that two Gaussian integers are relatively prime is $1/\zeta_G(2)$ where $\zeta_G$ is the $\zeta$ function associated with the Gaussian integers. In order to calculate the Gaussian probability, they use a theorem that enables them to factor the $\zeta$ function into a product of the Riemann $\zeta$ function and a Dirichlet series called an L-series.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. and Inf. Sci., Ohio State Univ., Columbus, OH, USA", classification = "C1140 (Probability and statistics); C1160 (Combinatorial mathematics)", keywords = "Arbitrary number field; Dirichlet series; Gaussian integers; L-series; Probability; Relative primality; Riemann $\zeta$ function", thesaurus = "Number theory; Probability", } @InProceedings{Collins:1989:QES, author = "G. E. Collins and J. R. Johnson", title = "Quantifier elimination and the sign variation method for real root isolation", crossref = "Gonnet:1989:PAI", pages = "264--271", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p264-collins/", abstract = "An important aspect of the construction of a cylindrical algebraic decomposition (CAD) is real root isolation. Root isolation involves finding disjoint intervals, each containing a single root, for all of the real roots of a given polynomial. Root isolation is used to construct a CAD of the real line, which serves as the base case in the construction of higher dimensional CAD's. It is also an essential part of the extension phase, which lifts an induced CAD to the next higher dimension. The authors reexamine the sign variation method of root isolation devised by Collins and Akritas (1976). A new proof of termination is given, which more accurately describes the behavior of the algorithm. This theorem is then sharpened for the special case of cubic polynomials. The result for cubic polynomials is obtained with the aid of Collins's CAD based quantifier elimination algorithm.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. and Inf. Sci., Ohio State Univ., Columbus, OH, USA", classification = "C1110 (Algebra); C4130 (Interpolation and function approximation)", keywords = "algorithms; Cubic polynomials; Cylindrical algebraic decomposition; design; Disjoint intervals; Polynomial; Quantifier elimination; Real root isolation; Sign variation method; Symbol manipulation; theory", subject = "{\bf J.6} Computer Applications, COMPUTER-AIDED ENGINEERING. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", thesaurus = "Polynomials; Symbol manipulation", } @InProceedings{Cooperman:1989:RGC, author = "G. Cooperman and L. Finkelstein and E. Luks", title = "Reduction of group constructions to point stabilizers", crossref = "Gonnet:1989:PAI", pages = "351--356", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p351-cooperman/", abstract = "The construction of point stabilizer subgroups is a problem which has been studied intensively. This work describes a general reduction of certain group constructions to the point stabilizer problem. Examples are given for the centralizer, the normal closure, and a restricted group intersection problem. For the normal closure problem, this work provides an alternative to current algorithms, which are limited by the need for repeated closures under conjugation. For the centralizer and restricted group intersection problems, one can use an existing point stabilizer sequence along with a recent base change algorithm to avoid generating a new point stabilizer sequence. This reduces the time complexity by at least an order of magnitude. Algorithms and theoretical time estimates for the special case of a small base are also summarized. An implementation is in progress.", acknowledgement = ack-nhfb, affiliation = "Coll. of Comput. Sci., Northeastern Univ., Boston, MA, USA", classification = "C1110 (Algebra); C4240 (Programming and algorithm theory)", keywords = "algorithms; Base change algorithm; Centralizer; Group constructions; Group intersection; Group theory; Normal closure; Point stabilizers; theory; Time complexity", subject = "{\bf G.2.1} Mathematics of Computing, DISCRETE MATHEMATICS, Combinatorics, Permutations and combinations. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Number-theoretic computations. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Mechanical theorem proving.", thesaurus = "Computational complexity; Group theory; Symbol manipulation", } @InProceedings{Deprit:1989:MPS, author = "A. Deprit and E. Deprit", title = "Massively parallel symbolic computation", crossref = "Gonnet:1989:PAI", pages = "308--316", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p308-deprit/", abstract = "A massively parallel processor proves to be a powerful tool for manipulating the very large Poisson series encountered in nonlinear dynamics. Exploiting the algebraic structure of Poisson series leads quite naturally to parallel data structures and algorithms for symbolic manipulation. Exercising the parallel symbolic processor on the solution of Kepler's equation reveals the need to reexamine the serial computational methods traditionally applied to problems in dynamics.", acknowledgement = ack-nhfb, affiliation = "Nat. Inst. of Stand. and Technol., Gaithersburg, MD, USA", classification = "C1120 (Analysis); C4240 (Programming and algorithm theory); C7310 (Mathematics)", keywords = "Algebraic structure; algorithms; design; Massively parallel processor; Nonlinear dynamics; Parallel data structures; Symbolic manipulation; theory; Very large Poisson series", subject = "{\bf F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Modes of Computation, Parallelism and concurrency. {\bf E.1} Data, DATA STRUCTURES. {\bf G.1.5} Mathematics of Computing, NUMERICAL ANALYSIS, Roots of Nonlinear Equations. {\bf C.1.3} Computer Systems Organization, PROCESSOR ARCHITECTURES, Other Architecture Styles, Stack-oriented processors**.", thesaurus = "Data structures; Mathematics computing; Nonlinear equations; Parallel algorithms; Series [mathematics]; Symbol manipulation", } @InProceedings{Devitt:1989:UCA, author = "J. S. Devitt", title = "Unleashing computer algebra on the mathematics curriculum", crossref = "Gonnet:1989:PAI", pages = "218--227", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author presents examples of the actual use of a computer algebra system in the mathematics classroom. These methods and observations are based on the daily use of symbolic algebra during lectures. The potential for focusing student energies on the concepts and ideas of mathematical instead of just mimicking routine computations is enormous. Considerable work remains to make such tools widely accessible but the observations presented will help to make others aware of the great potential which exists for these and similar methods.", acknowledgement = ack-nhfb, affiliation = "Dept. of Math., Saskatchewan Univ., Saskatoon, Sask., Canada", classification = "C7310 (Mathematics); C7810C (Computer-aided instruction)", keywords = "Computer algebra; Educational computing; Mathematics curriculum; Symbolic algebra", thesaurus = "Educational computing; Mathematics computing; Symbol manipulation", } @InProceedings{Dewar:1989:IIS, author = "M. C. Dewar", title = "{IRENA}: an integrated symbolic and numerical computation environment", crossref = "Gonnet:1989:PAI", pages = "171--179", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Computer algebra systems provide an extremely user-friendly and natural problem-solving environment, but are comparatively slow and limited in the scope of problems they can treat. Programs which call routines from numerical software libraries are fast, but require longer development and testing time, as well as forcing potential users to describe their problems in what is, to them, an unnatural form. Both approaches have advantages and disadvantages, but until now it has been rather difficult to mix the two. The author describes IRENA, an interface between the computer algebra system REDUCE and the NAG numerical subroutine library, which provides the NAG user with the advantages of a computer algebra system and the REDUCE user with the facilities of an extensive library of numerical software. He discusses how the two methods could be used side-by-side to solve problems in definite integration.", acknowledgement = ack-nhfb, affiliation = "Sch. of Math. Sci., Bath Univ., UK", classification = "C4160 (Numerical integration and differentiation); C6130 (Data handling techniques); C7310 (Mathematics)", keywords = "Computer algebra system; Definite integration; IRENA; NAG; Numerical software; Numerical subroutine library; REDUCE", thesaurus = "Integration; Mathematics computing; Symbol manipulation; User interfaces", } @InProceedings{Dicrescenzo:1989:AEA, author = "C. Dicrescenzo and D. Duval", title = "Algebraic extensions and algebraic closure in {Scratchpad II}", crossref = "Gianni:1989:SAC", pages = "440--446", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Many problems in computer algebra, as well as in high-school exercises, are such that their statement only involves integers but their solution involves complex numbers. For example, the complex numbers $\sqrt{2}$ and $-\sqrt{2}$ appear in the solutions of elementary problems in various domains. The authors describe an implementation of an algebraic closure domain constructor in the language Scratchpad II. In the first part they analyze the problem, and in the second part they describe a solution based on the D5 system.", acknowledgement = ack-nhfb, affiliation = "TIM3, INPG, Grenoble, France", classification = "C7310 (Mathematics)", keywords = "Algebraic closure domain constructor; D5 system; Language Scratchpad II", thesaurus = "Mathematics computing; Symbol manipulation", } @InProceedings{Edelsbrunner:1989:TPS, author = "H. Edelsbrunner and F. P. Preparata and D. B. West", title = "Tetrahedrizing point sets in three dimensions", crossref = "Gianni:1989:SAC", pages = "315--331", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper offers combinatorial results on extremum problems concerning the number of tetrahedra in a tetrahedrization of $n$ points in general position in three dimensions, i.e. such that no four points are coplanar. It also presents an algorithm that in $O(n\log{}n)$ time constructs a tetrahedrization of a set of $n$ points consisting of at most $3n-11$ tetrahedra.", acknowledgement = ack-nhfb, affiliation = "Illinois Univ., Urbana, IL, USA", classification = "C4190 (Other numerical methods)", keywords = "Combinatorial results; Extremum problems; Tetrahedra; Tetrahedrization", thesaurus = "Computational geometry", } @InProceedings{Einwohner:1989:MPG, author = "T. H. Einwohner and R. J. Fateman", title = "A {MACSYMA} package for the generation and manipulation of {Chebyshev} series", crossref = "Gonnet:1989:PAI", pages = "180--185", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p180-einwohner/", abstract = "Techniques for a MACSYMA package for expanding an arbitrary univariate expression as a truncated series in Chebyshev polynomials and manipulating such expansions are described. A data structure is introduced to represent a truncated expansion in a set of orthogonal polynomials which contains the independent variable, the name of the orthogonal polynomial set, the number of terms retained, and a list of the expansion coefficients. The package converts a given expression into the aforementioned data structure. Special cases are the conversion of sums, products, the ratio, or the composition of truncated Chebyshev expansions. Another special case is converting an expression free of truncated Chebyshev expansions. The package generates exact expansion coefficients whenever possible. In addition to well-known Chebyshev expansions, such as for polynomials, the authors provide new methods for getting exact Chebyshev expansions for reciprocals of polynomials of degree one or two, meromorphic functions, arbitrary powers of a first-degree polynomial, the error-function, and generalized hypergeometric functions.", acknowledgement = ack-nhfb, affiliation = "Lawrence Livermore Lab., California Univ., CA, USA", classification = "C4130 (Interpolation and function approximation); C6120 (File organisation); C6130 (Data handling techniques); C7310 (Mathematics)", keywords = "algorithms; Chebyshev polynomials; Chebyshev series; Data structure; MACSYMA; Orthogonal polynomials; theory; Univariate expression", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf E.1} Data, DATA STRUCTURES. {\bf F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS AND PROBLEM COMPLEXITY, Numerical Algorithms and Problems, Computations on polynomials.", thesaurus = "Chebyshev approximation; Data structures; Mathematics computing; Polynomials; Series [mathematics]; Software packages; Symbol manipulation", } @InProceedings{Fateman:1989:LTR, author = "R. J. Fateman", title = "Lookup tables, recurrences and complexity", crossref = "Gonnet:1989:PAI", pages = "68--73", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p68-fateman/", abstract = "The use of lookup tables can reduce the complexity of calculation of functions defined typically by mathematical recurrence relations. Although this technique has been adopted by several algebraic manipulation systems, it has not been examined critically in the literature. While the use of tabulation or `memoization' seems to be particularly simple and worthwhile technique in some areas, there are some negative consequences. Furthermore, the expansion of this technique to other areas (other than recurrences) has not been subjected to analysis. The paper examines some of the assumptions.", acknowledgement = ack-nhfb, affiliation = "California Univ., Berkeley, CA, USA", classification = "C4210 (Formal logic); C4240 (Programming and algorithm theory)", keywords = "Algebraic manipulation; algorithms; Complexity; Functions; Lookup tables; Mathematical recurrence relations; theory", subject = "{\bf F.1.3} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Complexity Measures and Classes. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", thesaurus = "Computational complexity; Number theory; Recursive functions; Symbol manipulation; Table lookup", } @InProceedings{Fateman:1989:SSA, author = "R. J. Fateman", title = "Series solutions of algebraic and differential equations: a comparison of linear and quadratic algebraic convergence", crossref = "Gonnet:1989:PAI", pages = "11--16", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p11-fateman/", abstract = "Speed of convergence of Newton-like iterations in an algebraic domain can be affected heavily by the increasing cost of each step, so much so that a quadratically convergent algorithm with complex steps may be comparable to a slower one with simple steps. The author gives two examples: solving algebraic and first-order ordinary differential equations using the MACSYMA algebraic manipulation system, demonstrating this phenomenon. The relevant programs are exhibited in the hope that they might give rise to more widespread application of these techniques.", acknowledgement = ack-nhfb, affiliation = "California Univ., Berkeley, CA, USA", classification = "C4130 (Interpolation and function approximation); C4170 (Differential equations); C7310 (Mathematics)", keywords = "Algebraic equations; Algebraic manipulation system; algorithms; Convergence; Differential equations; Linear algebraic convergence; MACSYMA; Newton-like iterations; Polynomials; Quadratic algebraic convergence; Series solutions; theory", subject = "{\bf G.1.7} Mathematics of Computing, NUMERICAL ANALYSIS, Ordinary Differential Equations, Boundary value problems. {\bf G.1.4} Mathematics of Computing, NUMERICAL ANALYSIS, Quadrature and Numerical Differentiation, Iterative methods. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", thesaurus = "Convergence of numerical methods; Differential equations; Iterative methods; Mathematics computing; Polynomials; Series [mathematics]; Symbol manipulation", } @InProceedings{Fitch:1989:CRB, author = "J. Fitch", title = "Can {REDUCE} be run in parallel?", crossref = "Gonnet:1989:PAI", pages = "155--162", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p155-fitch/", abstract = "In order to make a substantial improvement in the performance of algebra systems it will eventually be necessary to use a parallel execution system. This paper considers one approach to detecting parallelism, an automatic method related to compilation, and applies it to REDUCE, and to the factoriser in particular.", acknowledgement = ack-nhfb, classification = "C6130 (Data handling techniques); C6150C (Compilers, interpreters and other processors); C7310 (Mathematics)", keywords = "Algebra systems; algorithms; Automatic method; Compilation; Factoriser; measurement; Parallel execution system; Parallelism; REDUCE", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems, REDUCE. {\bf F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Modes of Computation, Parallelism and concurrency. {\bf F.3.2} Theory of Computation, LOGICS AND MEANINGS OF PROGRAMS, Semantics of Programming Languages.", thesaurus = "Mathematics computing; Parallel programming; Program compilers; Symbol manipulation", } @InProceedings{Freire:1989:ASC, author = "E. Freire and E. Gamero and E. Ponce and L. G. Franquelo", title = "An algorithm for symbolic computation of center manifolds", crossref = "Gianni:1989:SAC", pages = "218--230", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A useful technique for the study of local bifurcations is part of the center manifold theory because a dimensional reduction is achieved. The computation of Taylor series approximations of center manifolds gives rise to several difficulties regarding the operational complexity and the computational effort. Previous works proceed in such a way that the computational effort is not optimized. In the paper an algorithm for center manifolds well suited to symbolic computation is presented. The algorithm is organized according to an iterative scheme making good use of the previous steps, thereby minimizing the number of operations. The results of two examples obtained through a REDUCE 3.2 implementation of the algorithm are included.", acknowledgement = ack-nhfb, affiliation = "Escuela Superior Ingenieros Ind., Sevilla, Spain", classification = "C1120 (Analysis); C4130 (Interpolation and function approximation); C4170 (Differential equations); C7310 (Mathematics)", keywords = "Algorithm; Center manifold theory; Computational effort; Dimensional reduction; Iterative scheme; Local bifurcations; Operational complexity; REDUCE 3.2; Symbolic computation; Taylor series approximations", thesaurus = "Approximation theory; Differential equations; Mathematics computing; Symbol manipulation", } @InProceedings{Galligo:1989:GEC, author = "Andr\'e Galligo and Lo{\"\i}c Pottier and Carlo Traverso", title = "Greater easy common divisor and standard basis completion algorithms", crossref = "Gianni:1989:SAC", pages = "162--176", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The paper considers arithmetic complexity problems; the main problem is how to limit the growth of the coefficients in the algorithms and the complexity of the field operations involved. The problem is important with every ground field, with the obvious exception of finite fields.", acknowledgement = ack-nhfb, affiliation = "Nice Univ., France", classification = "C4210 (Formal logic); C4240 (Programming and algorithm theory)", keywords = "Algorithms; Arithmetic complexity problems; Coefficients; Field operations; Greater easy common divisor; Standard basis completion algorithms", thesaurus = "Computational complexity; Rewriting systems", } @InProceedings{Gaonzalez:1989:SS, author = "L. Gaonzalez and H. Lombardi and T. Recio and M.-F. Roy", title = "{Sturm--Habicht} sequence", crossref = "Gonnet:1989:PAI", pages = "136--146", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p136-gaonzalez/", acknowledgement = ack-nhfb, keywords = "algorithms; theory", subject = "{\bf G.1.9} Mathematics of Computing, NUMERICAL ANALYSIS, Integral Equations. {\bf F.1.3} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Complexity Measures and Classes. {\bf G.1.0} Mathematics of Computing, NUMERICAL ANALYSIS, General, Parallel algorithms. {\bf G.1.0} Mathematics of Computing, NUMERICAL ANALYSIS, General, Computer arithmetic.", } @InProceedings{Geddes:1989:HMO, author = "K. O. Geddes and G. H. Gonnet and T. J. Smedley", title = "Heuristic methods for operations with algebraic numbers", crossref = "Gianni:1989:SAC", pages = "475--480", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Algorithms for doing computations involving algebraic numbers have been known for quite some time and implementations now exist in many computer algebra systems. Many of these algorithms have been analysed and shown to run in polynomial time and space, but in spite of this many real problems take large amounts of time and space to solve. The authors describe a heuristic method which can be used for many operations involving algebraic numbers. They give specifics for doing algebraic number inverses, and algebraic number polynomial exact division and greatest common divisor calculation. The heuristic will not solve all instances of these problems, but it returns either the correct result or with failure very quickly, and succeeds for a very large number of problems.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Waterloo Univ., Ont., Canada", classification = "C4130 (Interpolation and function approximation); C7310 (Mathematics)", keywords = "Algebraic numbers; Heuristic methods; Polynomial", thesaurus = "Polynomials; Symbol manipulation", } @InProceedings{Geddes:1989:NAC, author = "K. O. Geddes and G. H. Gonnet", title = "A new algorithm for computing symbolic limits using hierarchical series", crossref = "Gianni:1989:SAC", pages = "490--495", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors describe an algorithm for computing symbolic limits, i.e. limits of expressions in symbolic form, using hierarchical series. A hierarchical series consists of two levels: an inner level which uses a simple generalization of Laurent series with finite principal part and which captures the behaviour of subexpressions without essential singularities, and an outer level which captures the essential singularities. Once such a series has been computed for an expression at a given point, the limit of the expression at the point is determined by looking at the most significant term of the series. This algorithm solves the limit problem for a large class of expressions.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Waterloo Univ., Ont., Canada", classification = "C6130 (Data handling techniques); C7310 (Mathematics)", keywords = "Algorithm; Finite principal part; Hierarchical series; Laurent series; Limit problem; Singularities; Symbolic form; Symbolic limits", thesaurus = "Series [mathematics]; Symbol manipulation", } @InProceedings{Geddes:1989:RIM, author = "K. O. Geddes and L. Y. Stefanus", title = "On the {Risch--Norman} integration method and its implementation in {MAPLE}", crossref = "Gonnet:1989:PAI", pages = "212--217", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p212-geddes/", abstract = "Unlike the recursive Risch algorithm for the integration of transcendental elementary functions, the Risch--Norman method processes the tower of field extensions directly in one step. In addition to logarithmic and exponential field extensions, this method can handle extensions in terms of tangents. Consequently, it allows trigonometric functions to be treated without converting them to complex exponential form. The authors review this method and describe its implementation in MAPLE. A heuristic enhancement to this method is also presented.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Waterloo Univ., Ont., Canada", classification = "C1110 (Algebra); C1120 (Analysis); C4160 (Numerical integration and differentiation); C7310 (Mathematics)", keywords = "algorithms; Exponential field extensions; Logarithmic field extensions; MAPLE; Risch--Norman integration; Tangents; theory; Transcendental elementary functions; Trigonometric functions", subject = "{\bf G.1.9} Mathematics of Computing, NUMERICAL ANALYSIS, Integral Equations. {\bf F.1.3} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Complexity Measures and Classes. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf G.1.3} Mathematics of Computing, NUMERICAL ANALYSIS, Numerical Linear Algebra, Linear systems (direct and iterative methods).", thesaurus = "Functions; Integration; Mathematics computing; Symbol manipulation", } @InProceedings{Gianni:1989:DA, author = "P. Gianni and V. Miller and B. Trager", title = "Decomposition of algebras", crossref = "Gianni:1989:SAC", pages = "300--308", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors deal with the problem of decomposing finite commutative Q-algebras as a direct product of local Q-algebras. They solve this problem by reducing it to the problem of finding a decomposition of finite algebras over finite field. They show that it is possible to define a lifting process that allows to reconstruct the answer over the rational numbers. This lifting appears to be very efficient since it is a quadratic lifting that doesn't require stepwise inversions. It is easy to see that the Berlekamp--Hensel algorithm for the factorization of polynomials is a special case of this argument.", acknowledgement = ack-nhfb, affiliation = "IBM Thomas J. Watson Res. Center, Yorktown Heights, NY, USA", classification = "C1110 (Algebra); C4190 (Other numerical methods)", keywords = "Berlekamp--Hensel algorithm; Decomposing finite commutative Q-algebras; Lifting process", thesaurus = "Algebra; Computational geometry", } @InProceedings{Giusti:1989:ATP, author = "M. Giusti and D. Lazard and A. Valibouze", title = "Algebraic transformations of polynomial equations, symmetric polynomials and elimination", crossref = "Gianni:1989:SAC", pages = "309--314", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors define a general transformation of polynomials and study the following concrete problem: how to perform such a transformation using a standard system of computer algebra, providing the usual algebraic tools.", acknowledgement = ack-nhfb, affiliation = "Centre de Math., Ecole Polytech., Palaiseau, France", classification = "C4130 (Interpolation and function approximation); C6130 (Data handling techniques); C7310 (Mathematics)", keywords = "Algebraic tools; Algebraic transformations of polynomial equations; Computer algebra; Elimination; Symmetric polynomials", thesaurus = "Polynomials; Symbol manipulation", } @InProceedings{Giusti:1989:CRC, author = "M. Giusti", title = "On the {Castelnuovo} regularity for curves", crossref = "Gonnet:1989:PAI", pages = "250--253", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p250-giusti/", abstract = "Let $k$ be a field of characteristic zero; let us consider an algebraic subvariety of the projective space $P_k^n$, defined by a homogeneous ideal I of the polynomial algebra $R=k(x_o,\ldots{},x_n)$. There exists different objects measuring the complexity of this subvariety. Some invariants are naturally intrinsic: the dimension and the degree of the subvariety, the Hilbert function and its regularity, and the Castelnuovo regularity. A natural question is to study the relationships between the integers, at least when the dimension is small (less or equal to one). The author gives a slightly different version of the Castelnuovo--Gruson--Lazarsfeld--Peskine theorem (1983), which relates the Castelnuovo regularity and the degree in the case of curves with more general hypotheses but unfortunately slightly weaker conclusion.", acknowledgement = ack-nhfb, affiliation = "Centre de Mathematiques, CNRS, Ecole Polytechnique, Palaiseau, France", classification = "C1110 (Algebra); C4130 (Interpolation and function approximation)", keywords = "algorithms; Castelnuovo regularity; Complexity; Curves; design; Hilbert function; measurement; Polynomial algebra; Polynomials; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf F.1.3} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Complexity Measures and Classes.", thesaurus = "Computational complexity; Curve fitting; Polynomials", } @InProceedings{Gonzalez:1989:SS, author = "L. Gonzalez and H. Lombardi and T. Recio and M.-F. Roy", title = "{Sturm--Habicht} sequence", crossref = "Gonnet:1989:PAI", pages = "136--146", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Formal computations with inequalities is a subject of general interest in computer algebra. In particular it is fundamental in the parallelisation of basic algorithms and quantifier elimination for real closed fields. The authors give a generalisation of the Sturm theorem essentially due to Sylvester, which is the key for formal computations with inequalities. They study the subresultant sequence, precise some of the classical definitions in order to avoid problems and study specialisation properties. They introduce the Sturm--Habicht sequence, which generalizes Habicht's work (1948). This new sequence, obtained automatically from a subresultant sequence, has some remarkable properties: it gives the same information as the Sturm sequence, recovered by looking only at its principal coefficients; it can be computed by ring operations and exact divisions only, in polynomial time in case of integer coefficients, eventually by modular methods; it has good specialisation properties. Some information about applications and implementation of the Sturm--Habicht sequence is given.", acknowledgement = ack-nhfb, affiliation = "Dept. de Matematicas, Cantabria Univ., Spain", classification = "C1110 (Algebra); C4130 (Interpolation and function approximation); C4240 (Programming and algorithm theory)", keywords = "Computational complexity; Computer algebra; Inequalities; Integer coefficients; Modular methods; Parallelisation; Polynomial time; Quantifier elimination; Ring operations; Sturm theorem; Sturm--Habicht sequence", thesaurus = "Computational complexity; Parallel algorithms; Polynomials; Series [mathematics]; Symbol manipulation", } @InProceedings{Grigorev:1989:CCC, author = "D. Yu. Grigor'ev", title = "Complexity of computing the characters and the genre of a system of exterior differential equations", crossref = "Gianni:1989:SAC", pages = "534--543", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let a system $(\sum_JA_{J,i}(dX_{j1},\ldots{},dX_{jm})=0)_{m,i}$ of exterior differential equations be given, where $A_{J,i}$ are polynomials in $n$ variables $X_1,\ldots{}, X_n$ of degrees less than $d$ and skew-symmetric relatively to multiindices $J=(j_1,\ldots{}, j_m)$, the square brackets denote the exterior product of the differentials $dX_{j1},\ldots{}, dX_{jm}$. E. Cartan (1945) introduced the characters and the genre $h$ of the system. Cauchy--Kovalevski theorem guarantees the existence of an integral manifold (and even of the general form) with the dimension less or equal to $h$ satisfying the given system. An algorithm for computing the characters and the genre is designed with the running time polynomial in $L$, $(dn)^n$, herein $L$ denotes the bit-size of the system. The algorithm involves the subexponential-time procedures for finding the irreducible components of an algebraic variety.", acknowledgement = ack-nhfb, affiliation = "Dept. of Math., V. A. Steklov Inst., Acad. of Sci., Leningrad, USSR", classification = "C4130 (Interpolation and function approximation); C4170 (Differential equations)", keywords = "Algebraic variety; Cauchy--Kovalevski theorem; Characters; Exterior differential equations; Integral manifold; Irreducible components; Polynomials", thesaurus = "Differential equations; Polynomials", } @InProceedings{Grossman:1989:LTE, author = "R. Grossman and R. G. Larson", title = "Labeled trees and the efficient computation of derivations", crossref = "Gonnet:1989:PAI", pages = "74--80", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p74-grossman/", abstract = "The paper is concerned with the effective parallel symbolic computation of operators under composition. Examples include differential operators under composition and vector fields under the Lie bracket. In general, such operators do not commute. An important problem is to find efficient algorithms to write expressions involving noncommuting operators in terms of operators which do commute. If the original expression enjoys a certain symmetry, then naive rewriting requires the computation of terms which in the end cancel. Previously, the authors gave an algorithm which in some cases is exponentially faster than the naive expansion of the noncommutating operators (1989). In this paper they show how that algorithm can be naturally parallelized.", acknowledgement = ack-nhfb, affiliation = "Illinois Univ., Chicago, IL, USA", classification = "C1120 (Analysis); C1160 (Combinatorial mathematics); C4210 (Formal logic); C4240 (Programming and algorithm theory)", keywords = "algorithms; Computational complexity; Data structures; Derivations; Differential operators; Labeled trees; Lie bracket; Noncommuting operators; Operators; Parallel algorithms; Parallel symbolic computation; theory; Vector fields", subject = "{\bf I.1.2} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Algorithms, Algebraic algorithms. {\bf F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT DEVICES, Modes of Computation, Parallelism and concurrency.", thesaurus = "Computational complexity; Data structures; Differentiation; Parallel algorithms; Symbol manipulation; Trees [mathematics]", } @InProceedings{Hentzel:1989:VNA, author = "I. R. Hentzel and D. J. Pokrass", title = "Verification of non-identities in algebras", crossref = "Gianni:1989:SAC", pages = "496--507", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors present a computer assisted algorithm which establishes whether or not a proposed identity is a consequence of the defining identities of a variety of nonassociative algebras. When the nonassociative polynomial is not an identity, the algorithm produces a proof called a characteristic function. Like an ordinary counterexample, the characteristic function can be used to convince a verifier that the polynomial is not identically zero. However the characteristic function appears to be computationally easier to verify. Also, it reduces or eliminates problems with characteristic. The authors used this method to obtain and verify a new result in the theory of nonassociative algebras. Namely, in a free right alternative algebra $(a,a,b)^3 \ne 0$.", acknowledgement = ack-nhfb, affiliation = "Dept. of Math., Iowa State Univ., Ames, IA, USA", classification = "C7310 (Mathematics)", keywords = "Algebras; Characteristic function; Computer assisted algorithm; Nonassociative polynomial; Nonidentities verification", thesaurus = "Mathematics computing; Symbol manipulation", } @InProceedings{Juozapavicius:1989:SCW, author = "A. Juozapavicius", title = "Symbolic computation for {Witt} rings", crossref = "Gianni:1989:SAC", pages = "271--273", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author considers bilinear and quadratic forms over polynomial rings, such that they can carry linear discrete orderings. The author defines the notion of reduced form and presents theorems concerning equivalence of forms to their reduced presentation. The proofs of these statements are based on the Buchberger's algorithms and their modifications to Gr{\"o}bner bases.", acknowledgement = ack-nhfb, affiliation = "Dept. of Math., Vilnius State Univ., Lithuanian SSR, USSR", classification = "C4130 (Interpolation and function approximation); C7310 (Mathematics)", keywords = "Bilinear forms; Symbolic computation; Witt rings; Quadratic forms; Polynomial rings; Linear discrete orderings; Reduced form; Gr{\"o}bner bases", thesaurus = "Polynomials; Symbol manipulation", } @InProceedings{Kaltofen:1989:ISM, author = "E. Kaltofen and L. Yagati", title = "Improved sparse multivariate polynomial interpolation algorithms", crossref = "Gianni:1989:SAC", pages = "467--474", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The authors consider the problem of interpolating sparse multivariate polynomials from their values. They discuss two algorithms for sparse interpolation, one due to Ben-Or and Tiwari (1988) and the other due to Zippel (1988). They present efficient algorithms for finding the rank of certain special Toeplitz systems arising in the Ben-Or and Tiwari algorithm and for solving transposed Vandermonde systems of equations, the use of which greatly improves the time complexities of the two interpolation algorithms.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Rensselaer Polytech. Inst., Troy, NY, USA", classification = "C4130 (Interpolation and function approximation)", keywords = "Sparse multivariate polynomial interpolation algorithms; Time complexities; Toeplitz systems; Transposed Vandermonde systems of equations", thesaurus = "Interpolation; Polynomials", } @InProceedings{Kaltofen:1989:IVP, author = "E. Kaltofen and T. Valente and N. Yui", title = "An improved {Las Vegas} primality test", crossref = "Gonnet:1989:PAI", pages = "26--33", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p26-kaltofen/", abstract = "The authors present a modification of the Goldwasser--Kilian--Atkin primality test, which, when given an input $n$, outputs either prime or composite, along with a certificate of correctness which may be verified in polynomial time. Atkin's method computes the order of an elliptic curve whose endomorphism ring is isomorphic to the ring of integers of a given imaginary quadratic field $Q(\sqrt{-D})$. Once an appropriate order is found, the parameters of the curve are computed as a function of a root modulo $n$ of the Hilbert class equation for the Hilbert class field of $Q(\sqrt{-D})$. The modification proposed determines instead a root of the Watson class equation for $Q(\sqrt{-D})$ and applies a transformation to get a root of the corresponding Hilbert equation. This is a substantial improvement, in that the Watson equations have much smaller coefficients than do the Hilbert equations.", acknowledgement = ack-nhfb, affiliation = "Dept. of Comput. Sci., Rensselaer Polytech. Inst., Troy, NY, USA", classification = "C1160 (Combinatorial mathematics); C4240 (Programming and algorithm theory); C7310 (Mathematics)", keywords = "algorithms; Certificate of correctness; Elliptic curve; Endomorphism ring; Goldwasser--Kilian--Atkin primality test; Hilbert equation; Imaginary quadratic field; Las Vegas primality test; Number theory; Polynomial time; Prime number; Programming theory; theory; Watson class equation", subject = "{\bf G.1.8} Mathematics of Computing, NUMERICAL ANALYSIS, Partial Differential Equations, Hyperbolic equations. {\bf G.3} Mathematics of Computing, PROBABILITY AND STATISTICS. {\bf G.1.2} Mathematics of Computing, NUMERICAL ANALYSIS, Approximation.", thesaurus = "Computational complexity; Mathematics computing; Number theory; Program verification; Programming theory", } @InProceedings{Kirchner:1989:CER, author = "C. Kirchner and H. Kirchner", title = "Constrained equational reasoning", crossref = "Gonnet:1989:PAI", pages = "382--389", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p382-kirchner/", abstract = "The theory of constrained equational reasoning is outlined. Many questions and prolongations of this work arise.", acknowledgement = ack-nhfb, classification = "C4210 (Formal logic)", keywords = "algorithms; Constrained equational reasoning; Formal logic; Theorem proving; theory", subject = "{\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Logic and constraint programming. {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Computational logic.", thesaurus = "Formal logic; Theorem proving", } @InProceedings{Kobayashi:1989:SSA, author = "H. Kobayashi and S. Moritsugu and R. W. Hogan", title = "Solving systems of algebraic equations", crossref = "Gianni:1989:SAC", pages = "139--149", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Shows an algorithm for computing all the solutions with their multiplicities of a system of algebraic equations. The algorithm previously proposed by the authors for the case where the ideal is zero-dimensional and radical seems to have practical efficiency. The authors present a new method for solving systems which are not necessarily radical. The set of all solutions is partitioned into subsets each of which consists of mutually conjugate solutions having the same multiplicity.", acknowledgement = ack-nhfb, affiliation = "Dept. of Math., Coll. of Sci. and Technol., Nihon Univ., Tokyo, Japan", classification = "C1110 (Algebra); C4210 (Formal logic)", keywords = "Algebraic equations; Algorithm; Ideal; Multiplicities; Mutually conjugate solutions; Radical; Subsets; Zero-dimensional", thesaurus = "Algebra; Problem solving; Theorem proving", } @InProceedings{Kredel:1989:SDC, author = "H. Kredel", title = "Software development for computer algebra or from {ALDES\slash SAC-2} to {WEB\slash Modula-2}", crossref = "Gianni:1989:SAC", pages = "447--455", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author defines a new concept for developing computer algebra software. The development system will integrate a documentation system, a programming language, algorithm libraries, and an interactive calculation facility. The author exemplifies the workability of this concept by applying it to the well known ALDES/SAC-2 system. The ALDES Translator is modified to help in converting ALDES/SAC-2 Code to Modula-2. The implementation and module setup of the SAC-2 basic system, list processing system and arithmetic system in Modula-2 are discussed. An example gives a first idea of the performance of the system. The WEB System of Structured Documentation is used to generate documentation with {\TeX}.", acknowledgement = ack-nhfb, affiliation = "Passau Univ., West Germany", classification = "C6110B (Software engineering techniques); C7310 (Mathematics)", keywords = "ALDES/SAC-2 system; Algorithm libraries; Computer algebra software; Documentation system; Interactive calculation facility; Performance; Programming language; WEB/Modula-2", thesaurus = "Mathematics computing; Software engineering; Symbol manipulation", } @InProceedings{Kuhn:1989:MEC, author = "N. Kuhn and K. Madlener", title = "A method for enumerating cosets of a group presented by a canonical system", crossref = "Gonnet:1989:PAI", pages = "338--350", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p338-kuhn/", abstract = "The application of rewriting techniques to enumerate cosets of subgroups in groups is investigated. Given a class of groups $G$ having canonical string rewriting presentations the authors consider the GWP for this class which is defined by $GWP(w,U)$ iff $w$ in $$ for $w$ in finite $U$ contained in $G$, $G \in G$, where $$ is the subgroup of $G$ generated by $U$. They show how to associate to $U$ two rewriting relations to $-{}_U$ and implies $-{}_U$ on strings such that $w$ in $$ iff $w$ from $*$ to $-{}_U\lambda$ iff $w$ implied by $*\mbox{implies}-_U\lambda$ ($\lambda$ the empty word), both representing the left congruence generated by $$. They derive general critical pair criteria for confluence and $\lambda$-confluence for these relations. Using these criteria completion procedures can be constructed which enumerate cosets like the Todd--Coxeter algorithm without explicit definition of all cosets. The procedures are shown to be terminating if the index of the subgroup is finite or for groups with finite canonical monadic group presentations. If the completion procedure terminates it returns a prefix rewriting system which is confluent on $\Sigma *$, thus deciding the GWP and the index problem for this class of groups. The normal forms of the rewriting relations form a minimal Schreier-representative system of $$ in $G$.", acknowledgement = ack-nhfb, affiliation = "Fachbereich Inf., Kaiserslautern Univ., West Germany", classification = "C1110 (Algebra); C4210 (Formal logic)", keywords = "$\Lambda$-confluence; algorithms; Canonical string rewriting presentations; Completion procedures; Confluence; Cosets; Critical pair criteria; Decidability; Finite canonical monadic group presentations; Generalized word problem; Group theory; Minimal Schreier-representative system; Rewriting relations; Rewriting techniques; Subgroups; theory; Todd--Coxeter algorithm", subject = "{\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and Other Rewriting Systems. {\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and Other Rewriting Systems, Decision problems. {\bf I.1.3} Computing Methodologies, SYMBOLIC AND ALGEBRAIC MANIPULATION, Languages and Systems.", thesaurus = "Decidability; Group theory; Rewriting systems; Symbol manipulation", } @InProceedings{Kutzler:1989:CAT, author = "B. Kutzler", title = "Careful algebraic translations of geometry theorems", crossref = "Gonnet:1989:PAI", pages = "254--263", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p254-kutzler/", abstract = "Modern application areas like computer-aided design and robotics have revived interest in geometry. The algorithmic techniques of computer algebra are important tools for solving large classes of nonlinear geometric problems. However, their application requires a translation of geometric problems into algebraic form. So far, this algebraization process has not gained special attention, since it was considered `obvious'. In the context of automated geometry theorem proving, the use of algebraic deduction techniques led to very promising results, but it seemed to change the nature of proof problems from deciding the validity of a theorem to finding nondegeneracy conditions under which the theorem holds. A careful analysis shows, that this is mainly due to the `careless' translation method. A careful translation technique is presented that resolves this defect. The usefulness of the new algebraization method is demonstrated on concrete examples. A practical comparison with the former `careless' translation is done.", acknowledgement = ack-nhfb, affiliation = "Res. Inst. for Symbolic Comput., Johannes Kepler Univ., Linz, Austria", classification = "C1160 (Combinatorial mathematics); C4190 (Other numerical methods); C4210 (Formal logic); C4290 (Other computer theory); C7310 (Mathematics)", keywords = "Algebraic deduction; algorithms; Automated geometry theorem proving; Computer algebra; experimentation; Geometry theorems; Nonlinear geometric problems; theory", subject = "{\bf I.2.0} Computing Methodologies, ARTIFICIAL INTELLIGENCE, General. {\bf G.2.1} Mathematics of Computing, DISCRETE MATHEMATICS, Combinatorics.", thesaurus = "Computational geometry; Symbol manipulation; Theorem proving", } @InProceedings{MacCallum:1989:ODE, author = "M. A. H. MacCallum", title = "An ordinary differential equation solver for {REDUCE}", crossref = "Gianni:1989:SAC", pages = "196--205", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Progress and plans for the implementation of an ordinary differential equation solver in REDUCE 3.3 are reported; the aim is to incorporate the best available methods for obtaining closed-form solutions, and to aim at the `best possible' alternative when this fails. It is hoped that this will become a part of the standard REDUCE program library. Elementary capabilities have already been implemented, i.e. methods for first order differential equations of simple types and linear equations of any order with constant coefficients. The further methods to be used include: for first-order equations, an adaptation of Shtokhamer's MACSYMA program; for higher-order linear equations, factorisation of the operator where possible; and for nonlinear equations, the exploitation of Lie symmetries.", acknowledgement = ack-nhfb, affiliation = "Sch. of Math. Sci., Queen Mary Coll., London, UK", classification = "C1120 (Analysis); C4170 (Differential equations); C7310 (Mathematics)", keywords = "Closed-form solutions; Factorisation; First-order equations; Lie symmetries; MACSYMA program; Nonlinear equations; Ordinary differential equation solver; REDUCE 3.3; REDUCE program library", thesaurus = "Differential equations; Mathematics computing; Software packages; Subroutines", } @InProceedings{Menezes:1989:SCA, author = "A. J. Menezes and P. C. {van Oorschot} and S. A. Vanstone", title = "Some computational aspects of root finding in ${GF}(q^m)$", crossref = "Gianni:1989:SAC", pages = "259--270", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper is an implementation report comparing several variations of a deterministic algorithm for finding roots of polynomials in finite extension fields. Running times for problem instances in fields $\mbox{GF}(2^m)$, including $m>1000$, are given. Comparisons are made between the variations, and improvements achieved in running times are discussed.", acknowledgement = ack-nhfb, affiliation = "Waterloo Univ., Ont., Canada", classification = "C4130 (Interpolation and function approximation)", keywords = "Computational aspects; Root finding; Roots of polynomials", thesaurus = "Polynomials", } @InProceedings{Miller:1989:PGE, author = "B. R. Miller", title = "A program generator for efficient evaluation of {Fourier} series", crossref = "Gonnet:1989:PAI", pages = "199--206", year = "1989", bibdate = "Thu Mar 12 08:33:50 MST 1998", bibsource = "http://www.acm.org/pubs/toc/; http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org:80/pubs/citations/proceedings/issac/74540/p199-miller/", abstract = "Many fields require the evaluation of large multi-variate Fourier series, but the naive method of calling sine and cosine for each term can be prohibitive where computing resources are constrained or the series are extremely large (30000 terms). Although the number of such calls can be reduced by using trigonometric identities, such a reduction is usually not possible by hand. Indeed, even when it is carried out by computer, care must be taken to generate compact programs and avoid generating large numbers of intermediate terms. The author describes an algorithm for automatically generating very efficient Fortran programs directly from the mathematical description of the series to be evaluated. The resulting Fortran programs are 5-7 times faster than the naive version and sometimes significantly more compact.", acknowledgement = ack-nhfb, affiliation = "Nat. Inst. of Stand. and Technol., Gaithersbury, MD, USA", classification = "C6115 (Programming support); C7310 (Mathematics)", keywords = "algorithms; design; Fortran programs; Fourier series; languages; Program generator", subject = "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical Logic, Computability theory. {\bf D.3.4} Software, PROGRAMMING LANGUAGES, Processors, Code generation. {\bf D.3.3} Software, PROGRAMMING LANGUAGES, Language Constructs and Features, Procedures, functions, and subroutines.", thesaurus = "Automatic programming; Mathematics computing; Series [mathematics]; Symbol manipulation", } @InProceedings{Mora:1989:GBN, author = "T. Mora", title = "{Gr{\"o}bner} bases in noncommutative algebras", crossref = "Gianni:1989:SAC", pages = "150--161", year = "1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The author has studied, in 1988, the concept of standard and Gr{\"o}bner bases and algorithms for their computation in a very wide algebraic context (graded structures). It is easy to show that if $R=k/H$, where $H$ is the ideal generated by $(X_jX_j-c_{ij}X_iX_j-p_{ij})$ and $\deg(p_{ij})<\deg(X_iX_j)$ for each $i,j$, then $R$ is such a graded structure; so his previous techniques can be applied to it in order to define a concept of Gr{\"o}bner basis and to produce an algorithm for their computation, provided that if $J$ is the ideal generated by $(X_jX_i-c_{ij}X_iX_j:i$, homogeneous for the graduation defined above and containing J, is finitely generated; (2) For each homogeneous ideal $(h_1, \ldots{}, h_s)$ in $k/J$, it is possible to compute a finite set of syzygies, which together with the trivial ones, generate the module of syzygies; and (3) For each homogeneous ideal $(h_1, \ldots{}, h_s)$ and each homogeneous element $h$ in $k/J$, it is possible to decide whether $h$ in $(h_1,\ldots{},h_s)$, in which case it is possible to compute a representation of $h$ in terms of $(h_1,\ldots{},h_s)$. It turns out that the above conditions hold whenever for no $i_1 + n \log (D))) $, where $ N_1 $ is the number of nonzero entries of a multiplication matrix. This almost matches the complexity of computing the minimal polynomial of one multiplication matrix. Then, we address the general case and give corresponding complexity results. Our algorithm is dynamic in the sense that it selects automatically which strategy to use depending on the input. Its key ingredients are the Wiedemann algorithm to handle $1$-dimensional linear recurrence (for the shape position case), and the Berlekamp--Massey-Sakata algorithm from Coding Theory to handle multi-dimensional linearly recurring sequences in the general case.", acknowledgement = ack-nhfb, } @InProceedings{Giesbrecht:2011:DII, author = "Mark Giesbrecht and Daniel S. Roche", title = "Diversification improves interpolation", crossref = "Schost:2011:IPI", pages = "123--130", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993909", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We consider the problem of interpolating an unknown multivariate polynomial with coefficients taken from a finite field or as numerical approximations of complex numbers. Building on the recent work of Garg and Schost, we improve on the best-known algorithm for interpolation over large finite fields by presenting a Las Vegas randomized algorithm that uses fewer black box evaluations. Using related techniques, we also address numerical interpolation of sparse polynomials with complex coefficients, and provide the first provably stable algorithm (in the sense of relative error) for this problem, at the cost of modestly more evaluations. A key new technique is a randomization which makes all coefficients of the unknown polynomial distinguishable, producing what we call a diverse polynomial. Another departure from most previous approaches is that our algorithms do not rely on root finding as a subroutine. We show how these improvements affect the practical performance with trial implementations.", acknowledgement = ack-nhfb, } @InProceedings{Greuet:2011:DRI, author = "Aur{\'e}lien Greuet and Mohab Safey {El Din}", title = "Deciding reachability of the infimum of a multivariate polynomial", crossref = "Schost:2011:IPI", pages = "131--138", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993910", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let $ f \in Q[X_1, \ l d o t {s}, X_n] $ be of degree $D$. Algorithms for solving the unconstrained global optimization problem $ f *= \inf_x \in R^n f(x) $ are of first importance since this problem appears frequently in numerous applications in engineering sciences. This can be tackled by either designing appropriate quantifier elimination algorithms or by certifying lower bounds on $ f * $ by means of sums of squares decompositions but there is no efficient algorithm for deciding if $ f * $ is a minimum. This paper is dedicated to this important problem. We design a probabilistic algorithm that decides, for a given $f$ and the corresponding $ f * $, if $ f * $ is reached over $ R^n $ and computes a point $ x * \in R^n $ such that $ f(x *) = f * $ if such a point exists. This algorithm makes use of algebraic elimination algorithms and real root isolation. If $L$ is the length of a straight-line program evaluating $f$, algebraic elimination steps run in $ O(\log (D - 1) n^6 (n L + n^4) U ((D - 1)^{n + 1})^3) $ arithmetic operations in $Q$ where $ D = \deg (f) $ and $ U(x) = x (\log (x))^2 \log \log (x) $. Experiments show its practical efficiency.", acknowledgement = ack-nhfb, } @InProceedings{Guo:2011:ACS, author = "Leilei Guo and Feng Liu", title = "An algorithm for computing set-theoretic generators of an algebraic variety", crossref = "Schost:2011:IPI", pages = "139--146", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993911", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Based on Eisenbud's idea (see [Eisenbud, D., Evans, G., 1973. \booktitle{Every algebraic set in $n$-space is the intersection of $n$ hypersurfaces}. Invent. Math. 19, 107--112]), we present an algorithm for computing set-theoretic generators for any algebraic variety in the affine $n$-space, which consists of at most $n$ polynomials. With minor modifications, this algorithm is also valid for projective algebraic variety in projective $n$-space.", acknowledgement = ack-nhfb, } @InProceedings{Guo:2011:RPL, author = "Li Guo and William Y. Sit and Ronghua Zhang", title = "On {Rota}'s problem for linear operators in associative algebras", crossref = "Schost:2011:IPI", pages = "147--154", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993912", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A long standing problem of Gian-Carlo Rota for associative algebras is the classification of all linear operators that can be defined on them. In the 1970s, there were only a few known operators, for example, the derivative operator, the difference operator, the average operator and the Rota--Baxter operator. A few more appeared after Rota posed his problem. However, little progress was made to solve this problem in general. In part, this is because the precise meaning of the problem is not so well understood. In this paper, we propose a formulation of the problem using the framework of operated algebras and viewing an associative algebra with a linear operator as one that satisfies a certain operated polynomial identity. To narrow our focus more on the operators that Rota was interested in, we further consider two particular classes of operators, namely, those that generalize differential or Rota--Baxter operators. With the aid of computer algebra, we are able to come up with a list of these two classes of operators, and provide some evidence that these lists may be complete. Our search have revealed quite a few new operators of these types whose properties are expected to be similar to the differential operator and Rota--Baxter operator respectively. Recently, a more unified approach has emerged in related areas, such as difference algebra and differential algebra, and Rota--Baxter algebra and Nijenhuis algebra. The similarities in these theories can be more efficiently explored by advances on Rota's problem.", acknowledgement = ack-nhfb, } @InProceedings{Gupta:2011:CHF, author = "Somit Gupta and Arne Storjohann", title = "Computing {Hermite} forms of polynomial matrices", crossref = "Schost:2011:IPI", pages = "155--162", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993913", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper presents a new algorithm for computing the Hermite form of a polynomial matrix. Given a nonsingular $ n \times n $ matrix $A$ filled with degree $d$ polynomials with coefficients from a field, the algorithm computes the Hermite form of $A$ using an expected number of $ (n^3 d)^{1 + o(1)} $ field operations. This is the first algorithm that is both softly linear in the degree $d$ and softly cubic in the dimension $n$. The algorithm is randomized of the Las Vegas type.", acknowledgement = ack-nhfb, } @InProceedings{Hart:2011:PPF, author = "William Hart and Mark van Hoeij and Andrew Novocin", title = "Practical polynomial factoring in polynomial time", crossref = "Schost:2011:IPI", pages = "163--170", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993914", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "State of the art factoring in $ Q[x] $ is dominated in theory by a combinatorial reconstruction problem while, excluding some rare polynomials, performance tends to be dominated by Hensel lifting. We present an algorithm which gives a practical improvement (less Hensel lifting) for these more common polynomials. In addition, factoring has suffered from a 25 year complexity gap because the best implementations are much faster in practice than their complexity bounds. We illustrate that this complexity gap can be closed by providing an implementation which is comparable to the best current implementations and for which competitive complexity results can be proved.", acknowledgement = ack-nhfb, } @InProceedings{Kaltofen:2011:QTC, author = "Erich L. Kaltofen and Michael Nehring and B. David Saunders", title = "Quadratic-time certificates in linear algebra", crossref = "Schost:2011:IPI", pages = "171--176", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993915", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present certificates for the positive semidefiniteness of an $n$ by $n$ matrix $A$, whose entries are integers of binary length $ \log || A || $, that can be verified in $ O(n^{(2 + \mu)} (\log || A ||)^{(1 + \mu)}) $ binary operations for any $ \mu > 0 $. The question arises in Hilbert\slash Artin-based rational sum-of-squares certificates (proofs) for polynomial inequalities with rational coefficients. We allow certificates that are validated by Monte Carlo randomized algorithms, as in Rusins Freivalds's famous 1979 quadratic time certification for the matrix product. Our certificates occupy $ O(n^{(3 + \mu)} (\log || A ||)^{(1 + \mu)}) $ bits, from which the verification algorithm randomly samples a quadratic amount. In addition, we give certificates of the same space and randomized validation time complexity for the Frobenius form, which includes the characteristic and minimal polynomial. For determinant and rank we have certificates of essentially-quadratic binary space and time complexity via Storjohann's algorithms.", acknowledgement = ack-nhfb, } @InProceedings{Kaltofen:2011:SBB, author = "Erich L. Kaltofen and Michael Nehring", title = "Supersparse black box rational function interpolation", crossref = "Schost:2011:IPI", pages = "177--186", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993916", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a method for interpolating a supersparse blackbox rational function with rational coefficients, for example, a ratio of binomials or trinomials with very high degree. We input a blackbox rational function, as well as an upper bound on the number of non-zero terms and an upper bound on the degree. The result is found by interpolating the rational function modulo a small prime $p$, and then applying an effective version of Dirichlet's Theorem on primes in an arithmetic progression progressively lift the result to larger primes. Eventually we reach a prime number that is larger than the inputted degree bound and we can recover the original function exactly. In a variant, the initial prime $p$ is large, but the exponents of the terms are known modulo larger and larger factors of $ p - 1 $. The algorithm, as presented, is conjectured to be polylogarithmic in the degree, but exponential in the number of terms. Therefore, it is very effective for rational functions with a small number of non-zero terms, such as the ratio of binomials, but it quickly becomes ineffective for a high number of terms. The algorithm is oblivious to whether the numerator and denominator have a common factor. The algorithm will recover the sparse form of the rational function, rather than the reduced form, which could be dense. We have experimentally tested the algorithm in the case of under 10 terms in numerator and denominator combined and observed its conjectured high efficiency.", acknowledgement = ack-nhfb, } @InProceedings{Kaminski:2011:UDC, author = "Jeremy-Yrmeyahu Kaminski and Yann Sepulcre", title = "Using discriminant curves to recover a surface of {$ P^4 $} from two generic linear projections", crossref = "Schost:2011:IPI", pages = "187--192", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993917", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We study how an irreducible smooth and closed algebraic surface X embedded in CP$^4$, can be recovered using its projections from two points onto embedded projective hyperplanes. The different embeddings are unknown. The only input is the defining equation of each projected surface. We show how both the embeddings and the surface in CP$^4$ can be recovered modulo some action of the group of projective transformations of CP$^4$. We show how in a generic situation, a characteristic matrix of the pair of embeddings can be recovered. Then we use this matrix to recover the class of the couple of maps and as a consequence to recover the surface. For a generic situation, two projections define a surface with two irreducible components. One component has degree d (d -1) and the other has degree d, being the original surface.", acknowledgement = ack-nhfb, } @InProceedings{Kapur:2011:CCG, author = "Deepak Kapur and Yao Sun and Dingkang Wang", title = "Computing comprehensive {Gr{\"o}bner} systems and comprehensive {Gr{\"o}bner} bases simultaneously", crossref = "Schost:2011:IPI", pages = "193--200", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993918", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In Kapur et al (ISSAC, 2010), a new method for computing a comprehensive Gr{\"o}bner system of a parameterized polynomial system was proposed and its efficiency over other known methods was effectively demonstrated. Based on those insights, a new approach is proposed for computing a comprehensive Gr{\"o}bner basis of a parameterized polynomial system. The key new idea is not to simplify a polynomial under various specialization of its parameters, but rather keep track in the polynomial, of the power products whose coefficients vanish; this is achieved by partitioning the polynomial into two parts- nonzero part and zero part for the specialization under consideration. During the computation of a comprehensive Gr{\"o}bner system, for a particular branch corresponding to a specialization of parameter values, nonzero parts of the polynomials dictate the computation, i.e., computing S-polynomials as well as for simplifying a polynomial with respect to other polynomials; but the manipulations on the whole polynomials (including their zero parts) are also performed. Gr{\"o}bner basis computations on such pairs of polynomials can also be viewed as Gr{\"o}bner basis computations on a module. Once a comprehensive Gr{\"o}bner system is generated, both nonzero and zero parts of the polynomials are collected from every branch and the result is a faithful comprehensive Gr{\"o}bner basis, to mean that every polynomial in a comprehensive Gr{\"o}bner basis belongs to the ideal of the original parameterized polynomial system. This technique should be applicable to other algorithms for computing a comprehensive Gr{\"o}bner system as well, thus producing both a comprehensive Gr{\"o}bner system as well as a faithful comprehensive Gr{\"o}bner basis of a parameterized polynomial system simultaneously. The approach is exhibited by adapting the recently proposed method for computing a comprehensive Gr{\"o}bner system in (ISSAC, 2010) for computing a comprehensive Gr{\"o}bner basis. The timings on a collection of examples demonstrate that this new algorithm for computing comprehensive Gr{\"o}bner bases has better performance than other existing algorithms.", acknowledgement = ack-nhfb, } @InProceedings{Kauers:2011:CT, author = "Manuel Kauers", title = "The concrete tetrahedron", crossref = "Schost:2011:IPI", pages = "7--8", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993892", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We give an overview over computer algebra algorithms for dealing with symbolic sums, recurrence equations, generating functions, and asymptotic estimates, and we will illustrate how to apply these algorithms to problems arising in discrete mathematics.", acknowledgement = ack-nhfb, } @InProceedings{Kauers:2011:RDB, author = "Manuel Kauers and Carsten Schneider", title = "A refined denominator bounding algorithm for multivariate linear difference equations", crossref = "Schost:2011:IPI", pages = "201--208", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993919", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We continue to investigate which polynomials can possibly occur as factors in the denominators of rational solutions of a given partial linear difference equation. In an earlier article we have introduced the distinction between periodic and aperiodic factors in the denominator, and we have given an algorithm for predicting the aperiodic ones. Now we extend this technique towards the periodic case and present a refined algorithm which also finds most of the periodic factors.", acknowledgement = ack-nhfb, } @InProceedings{Kerber:2011:ERR, author = "Michael Kerber and Michael Sagraloff", title = "Efficient real root approximation", crossref = "Schost:2011:IPI", pages = "209--216", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993920", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We consider the problem of approximating all real roots of a square-free polynomial f. Given isolating intervals, our algorithm refines each of them to a width at most $ 2^{-L} $, that is, each of the roots is approximated to $L$ bits after the binary point. Our method provides a certified answer for arbitrary real polynomials, only requiring finite approximations of the polynomial coefficient and choosing a suitable working precision adaptively. In this way, we get a correct algorithm that is simple to implement and practically efficient. Our algorithm uses the quadratic interval refinement method; we adapt that method to be able to cope with inaccuracies when evaluating $f$, without sacrificing its quadratic convergence behavior. We prove a bound on the bit complexity of our algorithm in terms of degree, coefficient size and discriminant. Our bound improves previous work on integer polynomials by a factor of $ \deg f $ and essentially matches best known theoretical bounds on root approximation which are obtained by very sophisticated algorithms.", acknowledgement = ack-nhfb, } @InProceedings{Li:2011:APF, author = "Yue Li and Gabriel {Dos Reis}", title = "An automatic parallelization framework for algebraic computation systems", crossref = "Schost:2011:IPI", pages = "233--240", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993923", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper proposes a non-intrusive automatic parallelization framework for typeful and property-aware computer algebra systems. Automatic parallelization remains a promising computer program transformation for exploiting ubiquitous concurrency facilities available in modern computers. The framework uses semantics-based static analysis to extract reductions in library components based on algebraic properties. An early implementation shows up to 5 times speed-up for library functions and homotopy-based polynomial system solver. The general framework is applicable to algebraic computation systems and programming languages with advanced type systems that support user-defined axioms or annotation systems.", acknowledgement = ack-nhfb, } @InProceedings{Li:2011:ARS, author = "Hongbo Li and Ruiyong Sun and Shoubin Yao and Ge Li", title = "Approximate rational solutions torational {ODEs} defined on discrete differentiable curves", crossref = "Schost:2011:IPI", pages = "217--224", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993921", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib", abstract = "In this paper, a new concept is proposed for discrete differential geometry: discrete n-differentiable curve, which is a tangent n-jet on a sequence of space points. A complete method is proposed to solve ODEs of the form n$^{(m)} = F(r, r', \ldots {}, r^{(n)}, n, n', \ldots {}, n^{(m - 1)}, u) / G (r, r', \ldots {}, r^{(n)}, n, n', \ldots {}, n^{(m - 1)}, u)$, where $F$, $G$ are respectively vector-valued and scalar-valued polynomials, where $r$ is a discrete curve obtained by sampling along an unknown smooth curve parametrized by $u$, and where $n$ is the vector field to be computed along the curve. Our Maple-13 program outputs an approximate rational solution with the highest order of approximation for given data and neighborhood size. The method is used to compute rotation minimizing frames of space curves in CAGD. For one-step backward-forward chasing, a 6th-order approximate rational solution is found, and 6 is guaranteed to be the highest order of approximation by rational functions. The theoretical order of approximation is also supported by numerical experiments.", acknowledgement = ack-nhfb, } @InProceedings{Li:2011:SDR, author = "Wei Li and Xiao-Shan Gao and Cum-Ming Yuan", title = "Sparse differential resultant", crossref = "Schost:2011:IPI", pages = "225--232", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993922", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, the concept of sparse differential resultant for a differentially essential system of differential polynomials is introduced and its properties are proved. In particular, a degree bound for the sparse differential resultant is given. Based on the degree bound, an algorithm to compute the sparse differential resultant is proposed, which is single exponential in terms of the order, the number of variables, and the size of the differentially essential system.", acknowledgement = ack-nhfb, } @InProceedings{Ma:2011:MRG, author = "Yue Ma and Lihong Zhi", title = "The minimum-rank gram matrix completion via modified fixed point continuation method", crossref = "Schost:2011:IPI", pages = "241--248", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993924", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The problem of computing a representation for a real polynomial as a sum of minimum number of squares of polynomials can be casted as finding a symmetric positive semidefinite real matrix of minimum rank subject to linear equality constraints. In this paper, we propose algorithms for solving the minimum-rank Gram matrix completion problem, and show the convergence of these algorithms. Our methods are based on the fixed point continuation method. We also use the Barzilai--Borwein technique and a specific linear combination of two previous iterates to accelerate the convergence of modified fixed point continuation algorithms. We demonstrate the effectiveness of our algorithms for computing approximate and exact rational sum of squares decompositions of polynomials with rational coefficients.", acknowledgement = ack-nhfb, } @InProceedings{Mantzaflaris:2011:DCI, author = "Angelos Mantzaflaris and Bernard Mourrain", title = "Deflation and certified isolation of singular zeros of polynomial systems", crossref = "Schost:2011:IPI", pages = "249--256", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993925", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We develop a new symbolic-numeric algorithm for the certification of singular isolated points, using their associated local ring structure and certified numerical computations. An improvement of an existing method to compute inverse systems is presented, which avoids redundant computation and reduces the size of the intermediate linear systems to solve. We derive a one-step deflation technique, from the description of the multiplicity structure in terms of differentials. The deflated system can be used in Newton-based iterative schemes with quadratic convergence. Starting from a polynomial system and a sufficiently small neighborhood, we obtain a criterion for the existence and uniqueness of a singular root of a given multiplicity structure, applying a well-chosen symbolic perturbation. Standard verification methods, based e.g. on interval arithmetic and a fixed point theorem, are employed to certify that there exists a unique perturbed system with a singular root in the domain. Applications to topological degree computation and to the analysis of real branches of an implicit curve illustrate the method.", acknowledgement = ack-nhfb, } @InProceedings{Mayr:2011:SEG, author = "Ernst W. Mayr and Stephan Ritscher", title = "Space-efficient {Gr{\"o}bner} basis computation without degree bounds", crossref = "Schost:2011:IPI", pages = "257--264", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993926", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The computation of a Gr{\"o}bner basis of a polynomial ideal is known to be exponential space complete. We revisit the algorithm by K{\"u}hnle and Mayr using recent improvements of various degree bounds. The result is an algorithm which is exponential in the ideal dimension (rather than the number of indeterminates). Furthermore, we provide an incremental version of the algorithm which is independent of the knowledge of degree bounds. Employing a space-efficient implementation of Buchberger's S-criterion, the algorithm can be implemented such that the space requirement depends on the representation and Gr{\"o}bner basis degrees of the problem instance (instead of the worst case) and thus is much lower in average.", acknowledgement = ack-nhfb, } @InProceedings{Miller:2011:CAE, author = "Victor S. Miller", title = "Computational aspects of elliptic curves and modular forms", crossref = "Schost:2011:IPI", pages = "1--2", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993888", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The ultimate motivation for much of the study of Number Theory is the solution of Diophantine Equations --- finding integer solutions to systems of equations. Elliptic curves comprise a large, and important class of such equations. Throughout the history of their study Elliptic Curves have always had a strong algorithmic component. In the early 1960's Birch and Swinnerton-Dyer developed systematic algorithms to automate a generalization of a procedure called ``descent'' which went back to Fermat. The data they obtained was instrumental in formulating their famous conjecture, which is now one of the Clay Mathematical Institute's Millenium prizes.", acknowledgement = ack-nhfb, } @InProceedings{Moody:2011:DPJ, author = "Dustin Moody", title = "Division polynomials for {Jacobi} quartic curves", crossref = "Schost:2011:IPI", pages = "265--272", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993927", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we find division polynomials for Jacobi quartics. These curves are an alternate model for elliptic curves to the more common Weierstrass equation. Division polynomials for Weierstrass curves are well known, and the division polynomials we find are analogues for Jacobi quartics. Using the division polynomials, we show recursive formulas for the n -th multiple of a point on the quartic curve. As an application, we prove a type of mean-value theorem for Jacobi quartics. These results can be extended to other models of elliptic curves, namely, Jacobi intersections and Huff curves.", acknowledgement = ack-nhfb, } @InProceedings{Nagasaka:2011:CSG, author = "Kosaku Nagasaka", title = "Computing a structured {Gr{\"o}bner} basis approximately", crossref = "Schost:2011:IPI", pages = "273--280", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993928", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "There are several preliminary definitions for a Gr{\"o}bner basis with inexact input since computing such a basis is one of the challenging problems in symbolic-numeric computations for several decades. A structured Gr{\"o}bner basis is such a basis defined from the data mining point of view: how to extract a meaningful result from the given inexact input when the amount of noise is not small or we do not have enough information about the input. However, the known algorithm needs a suitable (unknown) information on terms required for a variant of the Buchberger algorithm. In this paper, we introduce an improved version of the algorithm that does not need any extra information in advance.", acknowledgement = ack-nhfb, } @InProceedings{Pan:2011:RPM, author = "Victor Y. Pan and Guoliang Qian and Ai-Long Zheng", title = "Randomized preconditioning of the {MBA} algorithm", crossref = "Schost:2011:IPI", pages = "281--288", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993929", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "MBA algorithm inverts a structured matrix in nearly linear arithmetic time but requires a serious restriction on the input class. We remove this restriction by means of randomization and extend the progress to some fundamental computations with polynomials, e.g., computing their GCDs and AGCDs, where most effective known algorithms rely on computations with matrices having Toeplitz-like structure. Furthermore, our randomized algorithms fix rank deficiency and ill conditioning of general and structured matrices. At the end we comment on a wide range of other natural extensions of our progress and underlying ideas.", acknowledgement = ack-nhfb, } @InProceedings{Pospelov:2011:FFT, author = "Alexey Pospelov", title = "{Fast Fourier Transforms} over poor fields", crossref = "Schost:2011:IPI", pages = "289--296", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993930", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a new algebraic algorithm for computing the discrete Fourier transform over arbitrary fields. It computes DFTs of infinitely many orders $n$ in $ O(n \log n) $ algebraic operations, while the complexity of a straightforward application of the known FFT algorithms can be $ \Omega (n^{1.5}) $ for such $n$. Our algorithm is a novel combination of the classical FFT algorithms, and is never slower than any of the latter. As an application we come up with an efficient way of computing DFTs of high orders in finite field extensions which can further boost polynomial multiplication algorithms. We relate the complexities of the DFTs of such orders with the complexity of polynomial multiplication.", acknowledgement = ack-nhfb, } @InProceedings{Sagraloff:2011:SEE, author = "Michael Sagraloff and Chee K. Yap", title = "A simple but exact and efficient algorithm for complex root isolation", crossref = "Schost:2011:IPI", pages = "353--360", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993938", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a new exact subdivision algorithm CEVAL for isolating the complex roots of a square-free polynomial in any given box. It is a generalization of a previous real root isolation algorithm called EVAL. Under suitable conditions, our approach is applicable for general analytic functions. CEVAL is based on the simple Bolzano Principle and is easy to implement exactly. Preliminary experiments have shown its competitiveness. We further show that, for the ``benchmark problem'' of isolating all roots of a square-free polynomial with integer coefficients, the asymptotic complexity of both algorithms EVAL and CEVAL matches (up a logarithmic term) that of more sophisticated real root isolation methods which are based on Descartes' Rule of Signs, Continued Fraction or Sturm sequence. In particular, we show that the tree size of EVAL matches that of other algorithms. Our analysis is based on a novel technique called \Delta -clusters from which we expect to see further applications.", acknowledgement = ack-nhfb, } @InProceedings{Sarkar:2011:NRR, author = "Soumojit Sarkar and Arne Storjohann", title = "Normalization of row reduced matrices", crossref = "Schost:2011:IPI", pages = "297--304", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993931", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper gives a deterministic algorithm to transform a row reduced matrix to canonical Popov form. Given as input a row reduced matrix $R$ over $ K[x] $, $ K a $ field, our algorithm computes the Popov form in about the same time as required to multiply together over $ K[x] $ two matrices of the same dimension and degree as $R$. We also show that the problem of transforming a row reduced matrix to Popov form is at least as hard as polynomial matrix multiplication.", acknowledgement = ack-nhfb, } @InProceedings{Saunders:2011:NSE, author = "B. David Saunders and David Harlan Wood and Bryan S. Youse", title = "Numeric-symbolic exact rational linear system solver", crossref = "Schost:2011:IPI", pages = "305--312", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993932", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "An iterative refinement approach is taken to rational linear system solving. Such methods produce, for each entry of the solution vector, a rational approximation with denominator a power of 2. From this the correct rational entry can be reconstructed. Our iteration is a numeric-symbolic hybrid in that it uses an approximate numeric solver at each step together with a symbolic (exact arithmetic) residual computation and symbolic rational reconstruction. The rational solution may be checked symbolically (exactly). However, there is some possibility of failure of convergence, usually due to numeric ill-conditioning. Alternatively, the algorithm may be used to obtain an extended precision floating point approximation of any specified precision. In this case we cannot guarantee the result by rational reconstruction and an exact solution check, but the approach gives evidence (not proof) that the probability of error is extremely small. The chief contributions of the method and implementation are (1) confirmed continuation, (2) improved rational reconstruction, and (3) faster and more robust performance.", acknowledgement = ack-nhfb, } @InProceedings{She:2011:AAA, author = "Zhikun She and Bai Xue and Zhiming Zheng", title = "Algebraic analysis on asymptotic stability of continuous dynamical systems", crossref = "Schost:2011:IPI", pages = "313--320", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993933", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we propose a mechanisable technique for asymptotic stability analysis of continuous dynamical systems. We start from linearizing a continuous dynamical system, solving the Lyapunov matrix equation and then check whether the solution is positive definite. For the cases that the Jacobian matrix is not a Hurwitz matrix, we first derive an algebraizable sufficient condition for the existence of a Lyapunov function in quadratic form without linearization. Then, we apply a real root classification based method step by step to formulate this derived condition as a semi-algebraic set such that the semi-algebraic set only involves the coefficients of the pre-assumed quadratic form. Finally, we compute a sample point in the resulting semi-algebraic set for the coefficients resulting in a Lyapunov function. In this way, we avoid the use of generic quantifier elimination techniques for efficient computation. We prototypically implemented our algorithm based on DISCOVERER. The experimental results and comparisons demonstrate the feasibility and promise of our approach.", acknowledgement = ack-nhfb, } @InProceedings{Strzebonski:2011:URR, author = "Adam Strzebonski and Elias Tsigaridas", title = "Univariate real root isolation in an extension field", crossref = "Schost:2011:IPI", pages = "321--328", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993934", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/mathematica.bib", abstract = "We present algorithmic, complexity and implementation results for the problem of isolating the real roots of a univariate polynomial in $ B_{\alpha} \in L [y] $, where $ L = Q \alpha $ is a simple algebraic extension of the rational numbers. We revisit two approaches for the problem. In the first approach, using resultant computations, we perform a reduction to a polynomial with integer coefficients and we deduce a bound of $ O_B(N^{10}) $ for isolating the real roots of $ B_\alpha $, where $N$ is an upper bound on all the quantities (degree and bitsize) of the input polynomials. In the second approach we isolate the real roots working directly on the polynomial of the input. We compute improved separation bounds for the roots and we prove that they are optimal, under mild assumptions. For isolating the real roots we consider a modified Sturm algorithm, and a modified version of Descartes' algorithm introduced by Sagraloff. For the former we prove a complexity bound of $ O_B(N^8) $ and for the latter a bound of $ O_B(N^7) $. We implemented the algorithms in C as part of the core library of Mathematica and we illustrate their efficiency over various data sets. Finally, we present complexity for the general case of the first approach, where the coefficients belong to multiple extensions.", acknowledgement = ack-nhfb, } @InProceedings{Sturm:2011:VSU, author = "Thomas Sturm and Ashish Tiwari", title = "Verification and synthesis using real quantifier elimination", crossref = "Schost:2011:IPI", pages = "329--336", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993935", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present the application of real quantifier elimination to formal verification and synthesis of continuous and switched dynamical systems. Through a series of case studies, we show how first-order formulas over the reals arise when formally analyzing models of complex control systems. Existing off-the-shelf quantifier elimination procedures are not successful in eliminating quantifiers from many of our benchmarks. We therefore automatically combine three established software components: virtual substitution based quantifier elimination in Reduce/Redlog, cylindrical algebraic decomposition implemented in Qepcad, and the simplifier Slfq implemented on top of Qepcad. We use this combination to successfully analyze various models of systems including adaptive cruise control in automobiles, adaptive flight control system, and the classical inverted pendulum problem studied in control theory.", acknowledgement = ack-nhfb, } @InProceedings{Sun:2011:GCS, author = "Yao Sun and Dingkang Wang", title = "A generalized criterion for signature related {Gr{\"o}bner} basis algorithms", crossref = "Schost:2011:IPI", pages = "337--344", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993936", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A generalized criterion for signature related algorithms to compute Gr{\"o}bner basis is proposed in this paper. Signature related algorithms are a popular kind of algorithms for computing Gr{\"o}bner basis, including the famous F5 algorithm, the F5C algorithm, the extended F5 algorithm and the GVW algorithm. The main purpose of current paper is to study in theory what kind of criteria is correct in signature related algorithms and provide a generalized method to develop new criteria. For this purpose, a generalized criterion is proposed. The generalized criterion only relies on a general partial order defined on a set of polynomials. When specializing the partial order to appropriate specific orders, the generalized criterion can specialize to almost all existing criteria of signature related algorithms. For admissible partial orders, a proof is presented for the correctness of the algorithm that is based on this generalized criterion. And the partial orders implied by the criteria of F5 and GVW are also shown to be admissible in this paper. More importantly, the generalized criterion provides an effective method to check whether a new criterion is correct as well as to develop new criteria for signature related algorithms.", acknowledgement = ack-nhfb, } @InProceedings{Szanto:2011:HSN, author = "Agnes Szanto", title = "Hybrid symbolic-numeric methods for the solution of polynomial systems: tutorial overview", crossref = "Schost:2011:IPI", pages = "9--10", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993893", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this tutorial we will focus on the solution of polynomial systems given with inexact coefficients using hybrid symbolic-numeric methods. In particular, we will concentrate on systems that are over-constrained or have roots with multiplicities. These systems are considered ill-posed or ill-conditioned by traditional numerical methods and they try to avoid them. On the other hand, traditional symbolic methods are not designed to handle inexactness. Ill-conditioned polynomial equation systems arise very frequently in many important applications areas such as geometric modeling, computer vision, fluid dynamics, etc.", acknowledgement = ack-nhfb, } @InProceedings{vanHoeij:2011:GS, author = "Mark van Hoeij and J{\"u}rgen Kl{\"u}ners and Andrew Novocin", title = "Generating subfields", crossref = "Schost:2011:IPI", pages = "345--352", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993937", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Given a field extension K/k of degree n we are interested in finding the subfields of K containing k. There can be more than polynomially many subfields. We introduce the notion of generating subfields, a set of up to n subfields whose intersections give the rest. We provide an efficient algorithm which uses linear algebra in k or lattice reduction along with factorization. Our implementation shows that previously difficult cases can now be handled.", acknowledgement = ack-nhfb, } @InProceedings{Villard:2011:RPL, author = "Gilles Villard", title = "Recent progress in linear algebra and lattice basis reduction", crossref = "Schost:2011:IPI", pages = "3--4", year = "2011", DOI = "https://doi.org/10.1145/1993886.1993889", bibdate = "Fri Mar 14 12:20:08 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A general goal concerning fundamental linear algebra problems is to reduce the complexity estimates to essentially the same as that of multiplying two matrices (plus possibly a cost related to the input and output sizes). Among the bottlenecks one usually finds the questions of designing a recursive approach and mastering the sizes of the intermediately computed data. In this talk we are interested in two special cases of lattice basis reduction. We consider bases given by square matrices over $ K[x] $ or $Z$, with, respectively, the notion of reduced form and LLL reduction. Our purpose is to introduce basic tools for understanding how to generalize the Lehmer and Knuth--Sch{\"o}nhage gcd algorithms for basis reduction. Over $ K[x] $ this generalization is a key ingredient for giving a basis reduction algorithm whose complexity estimate is essentially that of multiplying two polynomial matrices. Such a problem relation between integer basis reduction and integer matrix multiplication is not known. The topic receives a lot of attention, and recent results on the subject show that there might be room for progressing on the question.", acknowledgement = ack-nhfb, } @InProceedings{Abramov:2012:VMS, author = "S. A. Abramov and D. E. Khmelnov", title = "On valuations of meromorphic solutions of arbitrary-order linear difference systems with polynomial coefficients", crossref = "vanderHoeven:2012:IPI", pages = "12--19", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442836", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Algorithms for computing lower bounds on valuations (e.g., orders of the poles) of the components of meromorphic solutions of arbitrary-order linear difference systems with polynomial coefficients are considered. In addition to algorithms based on ideas which have been already utilized in computer algebra for treating normal first-order systems, a new algorithm using tropical calculations is proposed. It is shown that the latter algorithm is rather fast, and produces the bounds with good accuracy.", acknowledgement = ack-nhfb, } @InProceedings{Adrovic:2012:CPS, author = "Danko Adrovic and Jan Verschelde", title = "Computing {Puiseux} series for algebraic surfaces", crossref = "vanderHoeven:2012:IPI", pages = "20--27", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442837", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we outline an algorithmic approach to compute Puiseux series expansions for algebraic sets. The series expansions originate at the intersection of the algebraic set with as many coordinate planes as the dimension of the algebraic set. Our approach starts with a polyhedral method to compute cones of normal vectors to the Newton polytopes of the given polynomial system that defines the algebraic set. If as many vectors in the cone as the dimension of the algebraic set define an initial form system that has isolated solutions, then those vectors are potential tropisms for the initial term of the Puiseux series expansion. Our preliminary methods produce exact representations for solution sets of the cyclic $n$-roots problem, for $n = m^2$, corresponding to a result of Backelin.", acknowledgement = ack-nhfb, } @InProceedings{Albrecht:2012:MLD, author = "Martin R. Albrecht", title = "The {M4RIE} library for dense linear algebra over small fields with even characteristic", crossref = "vanderHoeven:2012:IPI", pages = "28--34", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442838", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We describe algorithms and implementations for linear algebra with dense matrices over $ F_2 e $ for $ 2 \leq e \leq 10 $. Our main contributions are: (1) a specialisation of precomputation tables to $ F_2 e $, called Newton--John tables in this work, to avoid scalar multiplications in Gaussian elimination and matrix multiplication, (2) an efficient implementation of Karatsuba-style multiplication for matrices over extension fields of $ F_2 $ and (3) a description of an open-source library --- called M4RIE --- providing the fastest known implementation of dense linear algebra over $ F_2 e $ with $ 2 \leq e \leq 10 $.", acknowledgement = ack-nhfb, } @InProceedings{Barkatou:2012:CCF, author = "M. A. Barkatou and T. Cluzeau and C. {El Bacha} and J.-A. Weil", title = "Computing closed form solutions of integrable connections", crossref = "vanderHoeven:2012:IPI", pages = "43--50", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442840", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib", abstract = "We present algorithms for computing rational and hyperexponential solutions of linear $D$-finite partial differential systems written as integrable connections. We show that these types of solutions can be computed recursively by adapting existing algorithms handling ordinary linear differential systems. We provide an arithmetic complexity analysis of the algorithms that we develop. A Maple implementation is available and some examples and applications are given.", acknowledgement = ack-nhfb, } @InProceedings{Barkatou:2012:SLO, author = "Moulay A. Barkatou and Clemens G. Raab", title = "Solving linear ordinary differential systems in hyperexponential extensions", crossref = "vanderHoeven:2012:IPI", pages = "51--58", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442841", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let F be a differential field generated from the rational functions over some constant field by one hyperexponential extension. We present an algorithm to compute the solutions in $F^n$ of systems of $n$ first-order linear ODEs. Solutions in $F$ of a scalar ODE of higher order can be determined by an algorithm of Bronstein and Fredet. Our approach avoids reduction to the scalar case. We also give examples to show how this can be applied to integration.", acknowledgement = ack-nhfb, } @InProceedings{Berthomieu:2012:RPA, author = "J{\'e}r{\'e}my Berthomieu and Romain Lebreton", title = "Relaxed $p$-adic {Hensel} lifting for algebraic systems", crossref = "vanderHoeven:2012:IPI", pages = "59--66", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442842", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In a previous article [1], an implementation of lazy p -adic integers with a multiplication of quasi-linear complexity, the so-called relaxed product, was presented. Given a ring $R$ and an element $p$ in $R$, we design a relaxed Hensel lifting for algebraic systems from $R / (p)$ to the $p$-adic completion $R_p$ of $R$. Thus, any root of linear and algebraic regular systems can be lifted with a quasi-optimal complexity. We report our implementations in C++ within the computer algebra system Mathemagix and compare them with Newton operator. As an application, we solve linear systems over the integers and compare the running times with Linbox and IML.", acknowledgement = ack-nhfb, } @InProceedings{Bettale:2012:SPS, author = "Luk Bettale and Jean-Charles Faug{\`e}re and Ludovic Perret", title = "Solving polynomial systems over finite fields: improved analysis of the hybrid approach", crossref = "vanderHoeven:2012:IPI", pages = "67--74", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442843", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The Polynomial System Solving (PoSSo) problem is a fundamental NP-Hard problem in computer algebra. Among others, PoSSo have applications in area such as coding theory and cryptology. Typically, the security of multivariate public-key schemes (MPKC) such as the UOV cryptosystem of Kipnis, Shamir and Patarin is directly related to the hardness of PoSSo over finite fields. The goal of this paper is to further understand the influence of finite fields on the hardness of PoSSo. To this end, we consider the so-called hybrid approach. This is a polynomial system solving method dedicated to finite fields proposed by Bettale, Faug{\`e}re and Perret (Journal of Mathematical Cryptography, 2009). The idea is to combine exhaustive search with Gr{\"o}bner bases. The efficiency of the hybrid approach is related to the choice of a trade-off between the two methods. We propose here an improved complexity analysis dedicated to quadratic systems. Whilst the principle of the hybrid approach is simple, its careful analysis leads to rather surprising and somehow unexpected results. We prove that the optimal trade-off (i.e. number of variables to be fixed) allowing to minimize the complexity is achieved by fixing a number of variables proportional to the number of variables of the system considered, denoted n. Under some natural algebraic assumption, we show that the asymptotic complexity of the hybrid approach is $ 2^{(3.31 - 3.62 \log 2 (q) - 1) n} $, where $q$ is the size of the field (under the condition in particular that $ \log (q) \ll n $). This is to date, the best complexity for solving PoSSo over finite fields (when $ q > 2 $). We have been able to quantify the gain provided by the hybrid approach compared to a direct Gr{\" o}bner basis method. For quadratic systems, we show (assuming a natural algebraic assumption) that this gain is exponential in the number of variables. Asymptotically, the gain is $ 2^{1.49 n} $ when both $n$ and $q$ grow to infinity and $ \log (q) \ll n $.", acknowledgement = ack-nhfb, } @InProceedings{Beukers:2012:HFC, author = "Frits Beukers", title = "{$A$}-hypergeometric functions: computational aspects", crossref = "vanderHoeven:2012:IPI", pages = "1--2", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442830", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{Biasse:2012:PTA, author = "Jean-Fran{\c{c}}ois Biasse and Claus Fieker", title = "A polynomial time algorithm for computing the {HNF} of a module over the integers of a number field", crossref = "vanderHoeven:2012:IPI", pages = "75--82", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442844", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a variation of the modular algorithm for computing the Hermite Normal Form of an $O_K$-module presented by Cohen [4], where $O_K$ is the ring of integers of a number field $K$. An approach presented in [4] based on reductions modulo ideals was conjectured to run in polynomial time by Cohen, but so far, no such proof was available in the literature. In this paper, we present a modification of the approach of [4] to prevent the coefficient swell and we rigorously assess its complexity with respect to the size of the input and the invariants of the field $K$.", acknowledgement = ack-nhfb, } @InProceedings{Biscani:2012:PSP, author = "Francesco Biscani", title = "Parallel sparse polynomial multiplication on modern hardware architectures", crossref = "vanderHoeven:2012:IPI", pages = "83--90", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442845", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/hash.bib; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a high performance algorithm for the parallel multiplication of sparse multivariate polynomials on modern computer architectures. The algorithm is built on three main concepts: a cache-friendly hash table implementation for the storage of polynomial terms in distributed form, a statistical method for the estimation of the size of the multiplication result, and the use of Kronecker substitution as a homomorphic hash function. The algorithm achieves high performance by promoting data access patterns that favour temporal and spatial locality of reference. We present benchmarks comparing our algorithm to routines of other computer algebra systems, both in sequential and parallel mode.", acknowledgement = ack-nhfb, } @InProceedings{Blankertz:2012:CCD, author = "Raoul Blankertz and Joachim von zur Gathen and Konstantin Ziegler", title = "Compositions and collisions at degree $ p^2 $", crossref = "vanderHoeven:2012:IPI", pages = "91--98", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442846", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A univariate polynomial $f$ over a field is decomposable if $ f = g o h = g (h) $ for nonlinear polynomials $g$ and $h$. In order to count the decomposables, one wants to know the number of equal-degree collisions of the form $ f = g o h = g * o h * $ with $ (g, h) /= (g *, h *) $ and $ \deg g = \deg g * $. Such collisions only occur in the wild case, where the field characteristic $p$ divides $ \deg f $. Reasonable bounds on the number of decomposables over a finite field are known, but they are less sharp in the wild case, in particular for degree $ p^2 $. We provide a classification of all polynomials of degree $ p^2 $ with a collision. It yields the exact number of decomposable polynomials of degree $ p^2 $ over a finite field of characteristic $p$. We also present an algorithm that determines whether a given polynomial of degree $ p^2 $ has a collision or not.", acknowledgement = ack-nhfb, } @InProceedings{Bostan:2012:FCC, author = "Alin Bostan and Fr{\'e}d{\'e}ric Chyzak and Bruno Salvy and Ziming Li", title = "Fast computation of common left multiples of linear ordinary differential operators", crossref = "vanderHoeven:2012:IPI", pages = "99--106", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442847", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We study tight bounds and fast algorithms for LCLMs of several linear differential operators with polynomial coefficients. We analyse the arithmetic complexity of existing algorithms for LCLMs, as well as the size of their outputs. We propose a new algorithm that recasts the LCLM computation in a linear algebra problem on a polynomial matrix. This algorithm yields sharp bounds on the coefficient degrees of the LCLM, improving by one order of magnitude the best bounds obtained using previous algorithms. The complexity of the new algorithm is almost optimal, in the sense that it nearly matches the arithmetic size of the output.", acknowledgement = ack-nhfb, } @InProceedings{Bostan:2012:PSS, author = "Alin Bostan and Bruno Salvy and Muhammad F. I. Chowdhury and {\'E}ric Schost and Romain Lebreton", title = "Power series solutions of singular $ (q) $-differential equations", crossref = "vanderHoeven:2012:IPI", pages = "107--114", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442848", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We provide algorithms computing power series solutions of a large class of differential or $q$-differential equations or systems. Their number of arithmetic operations grows linearly with the precision, up to logarithmic terms.", acknowledgement = ack-nhfb, } @InProceedings{Bournez:2012:CSI, author = "Olivier Bournez and Daniel S. Gra{\c{c}}a and Amaury Pouly", title = "On the complexity of solving initial value problems", crossref = "vanderHoeven:2012:IPI", pages = "115--121", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442849", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we prove that computing the solution of an initial-value problem $ y = p(y) $ with initial condition $ y (t_0) = y_0 \in R^d $ at time $ t_0 + T $ with precision $ 2^{- \mu } $ where $p$ is a vector of polynomials can be done in time polynomial in the value of $T$, $ \mu $ and $ Y = [{\rm equation}] $. Contrary to existing results, our algorithm works over any bounded or unbounded domain. Furthermore, we do not assume any Lipschitz condition on the initial-value problem.", acknowledgement = ack-nhfb, } @InProceedings{Chen:2012:ODC, author = "Shaoshi Chen and Manuel Kauers", title = "Order-degree curves for hypergeometric creative telescoping", crossref = "vanderHoeven:2012:IPI", pages = "122--129", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442850", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Creative telescoping applied to a bivariate proper hypergeometric term produces linear recurrence operators with polynomial coefficients, called telescopers. We provide bounds for the degrees of the polynomials appearing in these operators. Our bounds are expressed as curves in the $ (r, d) $-plane which assign to every order $r$ a bound on the degree $d$ of the telescopers. These curves are hyperbolas, which reflect the phenomenon that higher order telescopers tend to have lower degree, and vice versa.", acknowledgement = ack-nhfb, } @InProceedings{Chen:2012:TRA, author = "Shaoshi Chen and Manuel Kauers and Michael F. Singer", title = "Telescopers for rational and algebraic functions via residues", crossref = "vanderHoeven:2012:IPI", pages = "130--137", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442851", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We show that the problem of constructing telescopers for rational functions of $ m + 1 $ variables is equivalent to the problem of constructing telescopers for algebraic functions of $m$ variables and we present a new algorithm to construct telescopers for algebraic functions of two variables. These considerations are based on analyzing the residues of the input. According to experiments, the resulting algorithm for rational functions of three variables is faster than known algorithms, at least in some examples of combinatorial interest. The algorithm for algebraic functions implies a new bound on the order of the telescopers.", acknowledgement = ack-nhfb, } @InProceedings{Comer:2012:SPI, author = "Matthew T. Comer and Erich L. Kaltofen and Cl{\'e}ment Pernet", title = "Sparse polynomial interpolation and {Berlekamp\slash Massey} algorithms that correct outlier errors in input values", crossref = "vanderHoeven:2012:IPI", pages = "138--145", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442852", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We propose algorithms performing sparse interpolation with errors, based on Prony's--Ben-Or's {\&} Tiwari's algorithm, using a Berlekamp/Massey algorithm with early termination. First, we present an algorithm that can recover a $t$-sparse polynomial $f$ from a sequence of values, where some of the values are wrong, spoiled by either random or misleading errors. Our algorithm requires bounds $ T \geq t $ and $ E \geq e $, where $e$ is the number of evaluation errors. It interpolates $ f(\omega^i) $ for $ i = 1, \ldots {}, 2 T (E + 1) $, where $ \omega $ is a field element at which each non-zero term evaluates distinctly. We also investigate the problem of recovering the minimal linear generator from a sequence of field elements that are linearly generated, but where again $ e \leq E $ elements are erroneous. We show that there exist sequences of $ < 2 t (2 e + 1) $ elements, such that two distinct generators of length $t$ satisfy the linear recurrence up to $e$ faults, at least if the field has characteristic $ /= 2 $. Uniqueness can be proven (for any field characteristic) for length $ \geq 2 t (2 e + 1) $ of the sequence with e errors. Finally, we present the Majority Rule Berlekamp/Massey algorithm, which can recover the unique minimal linear generator of degree $t$ when given bounds $ T \geq t $ and $ E \geq e $ and the initial sequence segment of $ 2 T (2 E + 1) $ elements. Our algorithm also corrects the sequence segment. The Majority Rule algorithm yields a unique sparse interpolant for the first problem. The algorithms are applied to sparse interpolation algorithms with numeric noise, into which we now can bring outlier errors in the values.", acknowledgement = ack-nhfb, } @InProceedings{Elsheikh:2012:FCS, author = "Mustafa Elsheikh and Mark Giesbrecht and Andy Novocin and B. David Saunders", title = "Fast computation of {Smith} forms of sparse matrices over local rings", crossref = "vanderHoeven:2012:IPI", pages = "146--153", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442853", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present algorithms to compute the Smith Normal Form of matrices over two families of local rings. The algorithms use the black-box model which is suitable for sparse and structured matrices. The algorithms depend on a number of tools, such as matrix rank computation over finite fields, for which the best-known time- and memory-efficient algorithms are probabilistic. For an $ n \times n $ matrix $A$ over the ring $ F[z] / (f^e) $, where $ f^e $ is a power of an irreducible polynomial $ f \in F[z] $ of degree $d$, our algorithm requires $ O(\eta d e^2 n) $ operations in $F$, where our black-box is assumed to require $ O(\eta) $ operations in $F$ to compute a matrix-vector product by a vector over $ F[z] / (f^e) $ (and $ \eta $ is assumed greater than $ n d e $). The algorithm only requires additional storage for $ O(n d e) $ elements of $F$. In particular, if $ \eta = O(n d e) $, then our algorithm requires only $ O(n^2 d^2 e^3) $ operations in $F$, which is an improvement on known dense methods for small $d$ and $e$. For the ring $ Z / p^e Z $, where $p$ is a prime, we give an algorithm which is time- and memory-efficient when the number of nontrivial invariant factors is small. We describe a method for dimension reduction while preserving the invariant factors. The time complexity is essentially linear in $ \mu n r e \log p $, where $ \mu $ is the number of operations in $ Z / p Z $ to evaluate the black-box (assumed greater than $n$) and $r$ is the total number of non-zero invariant factors. To avoid the practical cost of conditioning, we give a Monte Carlo certificate, which at low cost, provides either a high probability of success or a proof of failure. The quest for a time- and memory-efficient solution without restrictions on the number of nontrivial invariant factors remains open. We offer a conjecture which may contribute toward that end.", acknowledgement = ack-nhfb, } @InProceedings{Emeliyanenko:2012:CSB, author = "Pavel Emeliyanenko and Michael Sagraloff", title = "On the complexity of solving a bivariate polynomial system", crossref = "vanderHoeven:2012:IPI", pages = "154--161", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442854", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We study the complexity of computing the real solutions of a bivariate polynomial system using the recently presented algorithm Bisolve [2]. Bisolve is an elimination method which, in a first step, projects the solutions of a system onto the $x$- and $y$-axes and, then, selects the actual solutions from the so induced candidate set. However, unlike similar algorithms, Bisolve requires no genericity assumption on the input, and there is no need for any kind of coordinate transformation. Furthermore, extensive benchmarks as presented in [2] confirm that the algorithm is highly practical, that is, a corresponding C++ implementation in Cgal outperforms state of the art approaches by a large factor. In this paper, we focus on the theoretical complexity of Bisolve. For two polynomials $ f, g \in Z[x, y] $ of total degree at most n with integer coefficients bounded by $ 2^\tau $, we show that Bisolve computes isolating boxes for all real solutions of the system $ f = g = 0 $ using $ O(n^8 + n^7 \tau) $ bit operations, thereby improving the previous record bound for the same task by several magnitudes.", acknowledgement = ack-nhfb, } @InProceedings{Faugere:2012:CPG, author = "Jean-Charles Faug{\`e}re and Mohab Safey {El Din} and Pierre-Jean Spaenlehauer", title = "Critical points and {Gr{\"o}bner} bases: the unmixed case", crossref = "vanderHoeven:2012:IPI", pages = "162--169", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442855", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We consider the problem of computing critical points of the restriction of a polynomial map to an algebraic variety. This is of first importance since the global minimum of such a map is reached at a critical point. Thus, these points appear naturally in non-convex polynomial optimization which occurs in a wide range of scientific applications (control theory, chemistry, economics,\ldots{}). Critical points also play a central role in recent algorithms of effective real algebraic geometry. Experimentally, it has been observed that Gr{\"o}bner basis algorithms are efficient to compute such points. Therefore, recent software based on the so-called Critical Point Method are built on Gr{\"o}bner bases engines. Let $ f_1, \ldots {}, f_p $ be polynomials in $ Q[x_1, \ldots {}, x_n] $ of degree $D$, $ V \subset C^n $ be their complex variety and $ \pi_1 $ be the projection map $ (x_1, \ldots {}, x_n) \to x_1 $. The critical points of the restriction of $ \pi_1 $ to $V$ are defined by the vanishing of $ f_1, \ldots {}, f_p $ and some maximal minors of the Jacobian matrix associated to $ f_1, \ldots {}, f_p $. Such a system is algebraically structured: the ideal it generates is the sum of a determinantal ideal and the ideal generated by $ f_1, \ldots {}, f_p $. We provide the first complexity estimates on the computation of Gr{\"o}bner bases of such systems defining critical points. We prove that under genericity assumptions on $ f_1, \ldots {}, f_p $, the complexity is polynomial in the generic number of critical points, i.e. $ D^p(D - 1)^{n - p} (n - 1 / p - 1) $. More particularly, in the quadratic case $ D = 2 $, the complexity of such a Gr{\"o}bner basis computation is polynomial in the number of variables $n$ and exponential in $p$. We also give experimental evidence supporting these theoretical results.", acknowledgement = ack-nhfb, } @InProceedings{Faugere:2012:SPS, author = "Jean-Charles Faug{\`e}re and Jules Svartz", title = "Solving polynomial systems globally invariant under an action of the symmetric group and application to the equilibria of {$N$} vortices in the plane", crossref = "vanderHoeven:2012:IPI", pages = "170--178", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442856", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We propose an efficient algorithm to solve polynomial systems of which equations are globally invariant under an action of the symmetric group G$_N$ acting on the variable x$_i$ with \sigma (x$_i$) = x$_{ \sigma (i)}$ and the number of variables is a multiple of N. For instance, we can assume that swapping two variables (or two pairs of variables) in one equation gives rise to another equation of the system (perhaps changing the sign). The idea is to apply many times divided difference operators to the original system in order to obtain a new system of equations involving only the symmetric functions of a subset of the variables. The next step is to solve the system using Gr{\"o}bner techniques; this is usually several order faster than computing the Gr{\"o}bner basis of the original system since the number of solutions of the corresponding ideal, which is always finite has been divided by at least N!. To illustrate the algorithm and to demonstrate its efficiency, we apply the method to a well known physical problem called equilibria positions of vortices. This problem has been studied for almost 150 years and goes back to works by von Helmholtz and Lord Kelvin. Assuming that all vortices have same vorticity, the problem can be reformulated as a system of polynomial equations invariant under an action of G$_N$. Using numerical methods, physicists have been able to compute solutions up to N \leq 7 but it was an open challenge to check whether the set of solution is complete. Direct naive approach of Gr{\"o}bner bases techniques give rise to hard-to-solve polynomial system: for instance, when N = 5, it takes several days to compute the Gr{\"o}bner basis and the number of solutions is 2060. By contrast, applying the new algorithm to the same problem gives rise to a system of 17 solutions that can be solved in less than 0.1 sec. Moreover, we are able to compute all equilibria when N \leq 7.", acknowledgement = ack-nhfb, } @InProceedings{Garcia:2012:RIA, author = "Maria Emilia Alonso Garcia and Andr{\'e} Galligo", title = "A root isolation algorithm for sparse univariate polynomials", crossref = "vanderHoeven:2012:IPI", pages = "35--42", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442839", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib", abstract = "We consider a univariate polynomial f with real coefficients having a high degree $N$ but a rather small number $ d + 1 $ of monomials, with $ d \ll N $. Such a sparse polynomial has a number of real root smaller or equal to $d$. Our target is to find for each real root of $f$ an interval isolating this root from the others. The usual subdivision methods, relying either on Sturm sequences or M{\"o}bius transform followed by Descartes' rule of sign, destruct the sparse structure. Our approach relies on the generalized Budan--Fourier theorem of Coste, Lajous, Lombardi, Roy [8] and the techniques developed in Galligo [12]. To such a $f$ is associated a set of $ d + 1 $ $F$-derivatives. The Budan=-Fourier function $ V_f(x) $ counts the sign changes in the sequence of $F$-derivatives of the $f$ evaluated at $x$. The values at which this function jumps are called the $F$-virtual roots of $f$, these include the real roots of $f$. We also consider the augmented $F$-virtual roots of $f$ and introduce a genericity property which eases our study. We present a real root isolation method and an algorithm which has been implemented in Maple. We rely on an improved generalized Budan--Fourier count applied to both the input polynomial and its reciprocal, together with Newton like approximation steps. The paper is illustrated with examples and pictures.", acknowledgement = ack-nhfb, } @InProceedings{Garoufalidis:2012:TQH, author = "Stavros Garoufalidis and Christoph Koutschan", title = "Twisting $q$-holonomic sequences by complex roots of unity", crossref = "vanderHoeven:2012:IPI", pages = "179--186", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442857", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A sequence $ f_n (q) $ is $q$-holonomic if it satisfies a nontrivial linear recurrence with coefficients polynomials in $q$ and $ q^n $. Our main theorems state that $q$-holonomicity is preserved under twisting, i.e., replacing $q$ by $ \omega q $ where $ \omega $ is a complex root of unity, and under the substitution $ q > q^\alpha $ where $ \alpha $ is a rational number. Our proofs are constructive, work in the multivariate setting of \partial -finite sequences and are implemented in the Mathematica package {\tt HolonomicFunctions}. Our results are illustrated by twisting natural $q$-holonomic sequences which appear in quantum topology, namely the colored Jones polynomial of pretzel knots and twist knots. The recurrence of the twisted colored Jones polynomial can be used to compute the asymptotics of the Kashaev invariant of a knot at an arbitrary complex root of unity.", acknowledgement = ack-nhfb, } @InProceedings{Gleixner:2012:IAL, author = "Ambros M. Gleixner and Daniel E. Steffy and Kati Wolter", title = "Improving the accuracy of linear programming solvers with iterative refinement", crossref = "vanderHoeven:2012:IPI", pages = "187--194", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442858", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We describe an iterative refinement procedure for computing extended precision or exact solutions to linear programming problems (LPs). Arbitrarily precise solutions can be computed by solving a sequence of closely related LPs with limited precision arithmetic. The LPs solved share the same constraint matrix as the original problem instance and are transformed only by modification of the objective function, right-hand side, and variable bounds. Exact computation is used to compute and store the exact representation of the transformed problems, while numeric computation is used for solving LPs. At all steps of the algorithm the LP bases encountered in the transformed problems correspond directly to LP bases in the original problem description. We demonstrate that this algorithm is effective in practice for computing extended precision solutions and that this leads to direct improvement of the best known methods for solving LPs exactly over the rational numbers.", acknowledgement = ack-nhfb, } @InProceedings{Guo:2012:CIH, author = "Feng Guo and Erich L. Kaltofen and Lihong Zhi", title = "Certificates of impossibility of {Hilbert--Artin} representations of a given degree for definite polynomials and functions", crossref = "vanderHoeven:2012:IPI", pages = "195--202", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442859", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We deploy numerical semidefinite programming and conversion to exact rational inequalities to certify that for a positive semidefinite input polynomial or rational function, any representation as a fraction of sums-of-squares of polynomials with real coefficients must contain polynomials in the denominator of degree no less than a given input lower bound. By Artin's solution to Hilbert's 17th problems, such representations always exist for some denominator degree. Our certificates of infeasibility are based on the generalization of Farkas's Lemma to semidefinite programming. The literature has many famous examples of impossibility of SOS representability including Motzkin's, Robinson's, Choi's and Lam's polynomials, and Reznick's lower degree bounds on uniform denominators, e.g., powers of the sum-of-squares of each variable. Our work on exact certificates for positive semidefiniteness allows for non-uniform denominators, which can have lower degree and are often easier to convert to exact identities. Here we demonstrate our algorithm by computing certificates of impossibilities for an arbitrary sum-of-squares denominator of degree 2 and 4 for some symmetric sextics in 4 and 5 variables, respectively. We can also certify impossibility of base polynomials in the denominator of restricted term structure, for instance as in Landau's reduction by one less variable.", acknowledgement = ack-nhfb, } @InProceedings{Hubert:2012:RIS, author = "Evelyne Hubert and George Labahn", title = "Rational invariants of scalings from {Hermite} normal forms", crossref = "vanderHoeven:2012:IPI", pages = "219--226", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442862", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Scalings form a class of group actions that have both theoretical and practical importance. A scaling is accurately described by an integer matrix. Tools from linear algebra are exploited to compute a minimal generating set of rational invariants, trivial rewriting and rational sections for such a group action. The primary tools used are Hermite normal forms and their unimodular multipliers. With the same line of ideas, a complete solution to the scaling symmetry reduction of a polynomial system is also presented.", acknowledgement = ack-nhfb, } @InProceedings{Ishikawa:2012:ZHA, author = "Masao Ishikawa and Christoph Koutschan", title = "{Zeilberger}'s holonomic ansatz for {Pfaffians}", crossref = "vanderHoeven:2012:IPI", pages = "227--233", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442863", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "A variation of Zeilberger's holonomic ansatz for symbolic determinant evaluations is proposed which is tailored to deal with Pfaffians. The method is also applicable to determinants of skew-symmetric matrices, for which the original approach does not work. As Zeilberger's approach is based on the Laplace expansion (cofactor expansion) of the determinant, we derive our approach from the cofactor expansion of the Pfaffian. To demonstrate the power of our method, we prove, using computer algebra algorithms, some conjectures proposed in the paper ``Pfaffian decomposition and a Pfaffian analogue of q -Catalan Hankel determinants'' by Ishikawa, Tagawa, and Zeng. A minor summation formula related to partitions and Motzkin paths follows as a corollary.", acknowledgement = ack-nhfb, } @InProceedings{Koiran:2012:UBR, author = "Pascal Koiran", title = "Upper bounds on real roots and lower bounds for the permanent", crossref = "vanderHoeven:2012:IPI", pages = "8--8", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442833", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{Lebreton:2012:AUD, author = "Romain Lebreton and {\'E}ric Schost", title = "Algorithms for the universal decomposition algebra", crossref = "vanderHoeven:2012:IPI", pages = "234--241", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442864", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let $k$ be a field and let $ f \in k [T] $ be a polynomial of degree $n$. The universal decomposition algebra $A$ is the quotient of $ k[X_1, \ldots {}, X_n] $ by the ideal of symmetric relations (those polynomials that vanish on all permutations of the roots of $f$). We show how to obtain efficient algorithms to compute in $A$. We use a univariate representation of $A$, i.e. an isomorphism of the form $ A k [T] / Q (T) $, since in this representation, arithmetic operations in $A$ are known to be quasi-optimal. We give details for two related algorithms, to find the isomorphism above, and to compute the characteristic polynomial of any element of $A$.", acknowledgement = ack-nhfb, } @InProceedings{Lella:2012:EIA, author = "Paolo Lella", title = "An efficient implementation of the algorithm computing the {Borel}-fixed points of a {Hilbert} scheme", crossref = "vanderHoeven:2012:IPI", pages = "242--248", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442865", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Borel-fixed ideals play a key role in the study of Hilbert schemes. Indeed each component and each intersection of components of a Hilbert scheme contains at least one Borel-fixed point, i.e. a point corresponding to a subscheme defined by a Borel-fixed ideal. Moreover Borel-fixed ideals have good combinatorial properties, which make them very interesting in an algorithmic perspective. In this paper, we propose an implementation of the algorithm computing all the saturated Borel-fixed ideals with number of variables and Hilbert polynomial assigned, introduced from a theoretical point of view in the paper ``Segment ideals and Hilbert schemes of points'', Discrete Mathematics 311 (2011).", acknowledgement = ack-nhfb, } @InProceedings{Levandovskyy:2012:ECA, author = "Viktor Levandovskyy", title = "Elements of computer-algebraic analysis", crossref = "vanderHoeven:2012:IPI", pages = "9--10", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442834", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Algebraic Analysis has been coined as a term in the mid 50's by the Japanese group led by Mikio Sato. In recent years many constructions of Algebraic Analysis have been approached from a computer-algebraic point of view, with algorithms and their implementations. Extension of such an interaction from linear differential operators to linear difference, q -difference, q -differential and other linear operators we call Computer-Algebraic Analysis. The major object of study are systems of linear functional equations, their properties, solutions (including those in terms of generalized functions) and behaviour.", acknowledgement = ack-nhfb, } @InProceedings{Ma:2012:CRS, author = "Yue Ma and Lihong Zhi", title = "Computing real solutions of polynomial systems via low-rank moment matrix completion", crossref = "vanderHoeven:2012:IPI", pages = "249--256", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442866", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we propose a new algorithm for computing real roots of polynomial equations or a subset of real roots in a given semi-algebraic set described by additional polynomial inequalities. The algorithm is based on using modified fixed point continuation method for solving Lasserre's hierarchy of moment relaxations. We establish convergence properties for our algorithm. For a large-scale polynomial system with only few real solutions in a given area, we can extract them quickly. Moreover, for a polynomial system with an infinite number of real solutions, our algorithm can also be used to find some isolated real solutions or real solutions on the manifolds.", acknowledgement = ack-nhfb, } @InProceedings{McCarron:2012:SHQ, author = "James McCarron", title = "Small homogeneous quandles", crossref = "vanderHoeven:2012:IPI", pages = "257--264", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442867", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We derive an algorithm for computing all the homogeneous quandles of a given order n provided that a list of the transitive permutation groups of degree n are known. We discuss the implementation of the algorithm, and use it to enumerate the number of isomorphism classes of homogeneous quandles up to order 23 and compute representatives for each class. We also completely determine the homogeneous quandles of prime order. As a by-product, we are able to confirm an independent calculation of the connected quandles of order at most 30 by Vendramin and, based on this, to compute the number of isomorphism classes of simple quandles to the same order.", acknowledgement = ack-nhfb, } @InProceedings{Mourrain:2012:BBR, author = "Bernard Mourrain and Philippe Tr{\'e}buchet", title = "Border basis representation of a general quotient algebra", crossref = "vanderHoeven:2012:IPI", pages = "265--272", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442868", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we generalized the construction of border bases to non-zero dimensional ideals for normal forms compatible with the degree, tackling the remaining obstacle for a general application of border basis methods. First, we give conditions to have a border basis up to a given degree. Next, we describe a new stopping criteria to determine when the reduction with respect to the leading terms is a normal form. This test based on the persistence and regularity theorems of Gotzmann yields a new algorithm for computing a border basis of any ideal, which proceeds incrementally degree by degree until its regularity. We detail it, prove its correctness, present its implementation and report some experimentations which illustrate its practical good behavior.", acknowledgement = ack-nhfb, } @InProceedings{Oaku:2012:ACD, author = "Toshinori Oaku", title = "An algorithm to compute the differential equations for the logarithm of a polynomial", crossref = "vanderHoeven:2012:IPI", pages = "273--280", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442869", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present an algorithm to compute the annihilator of (i.e., the linear differential equations for) the multi-valued analytic function $ f^\lambda (\log f)^m $ in the Weyl algebra $ D_n $ for a given non-constant polynomial $f$, a non-negative integer $m$, and a complex number $ \lambda $. This algorithm essentially consists of the differentiation with respect to $s$ of the annihilator of $ f^s $ in the ring $ D_n[s] $ and ideal quotient computation in $ D_n $. The obtained differential equations constitute what is called a holonomic system in $D$-module theory. Hence combined with the integration algorithm for $D$-modules, this enables us to compute a holonomic system for the integral of a function involving the logarithm of a polynomial with respect to some variables.", acknowledgement = ack-nhfb, } @InProceedings{Pauderis:2012:DUC, author = "Colton Pauderis and Arne Storjohann", title = "Deterministic unimodularity certification", crossref = "vanderHoeven:2012:IPI", pages = "281--288", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442870", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The asymptotically fastest algorithms for many linear algebra problems on integer matrices, including solving a system of linear equations and computing the determinant, use high-order lifting. Currently, high-order lifting requires the use of a randomized shifted number system to detect and avoid error-producing carries. By interleaving quadratic and linear lifting, we devise a new algorithm for high-order lifting that allows us to work in the usual symmetric range modulo p, thus avoiding randomization. As an application, we give a deterministic algorithm to assay if an n x n integer matrix A is unimodular. The cost of the algorithm is O ((\log n) n$^{ \omega }$ M(\log n + \log|| A ||)) bit operations, where || A || denotes the largest entry in absolute value, and M(t) is the cost of multiplying two integers bounded in bit length by t.", acknowledgement = ack-nhfb, } @InProceedings{Romero:2012:PBT, author = "Ana Romero and Francis Sergeraert", title = "Programming before theorizing, a case study", crossref = "vanderHoeven:2012:IPI", pages = "289--296", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442871", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper relates how a ``simple'' result in combinatorial homotopy eventually led to a totally new understanding of basic theorems in Algebraic Topology, namely the Eilenberg--Zilber theorem, the twisted Eilenberg--Zilber theorem, and finally the Eilenberg-MacLane correspondance between the Classifying Space and Bar constructions. In the last case, it was an amazing lucky consequence of computations based on conjectures not yet proved. The key new tool used in this context is Robin Forman's Discrete Vector Fields theory.", acknowledgement = ack-nhfb, } @InProceedings{Roune:2012:PGB, author = "Bjarke Hammersholt Roune and Michael Stillman", title = "Practical {Gr{\"o}bner} basis computation", crossref = "vanderHoeven:2012:IPI", pages = "203--210", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442860", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We report on our experiences exploring state of the art Gr{\"o}bner basis computation. We investigate signature based algorithms in detail. We also introduce new practical data structures and computational techniques for use in both signature based Gr{\"o}bner basis algorithms and more traditional variations of the classic Buchberger algorithm. Our conclusions are based on experiments using our new freely available open source standalone C++ library.", acknowledgement = ack-nhfb, } @InProceedings{Roy:2012:CDC, author = "Marie-Fran{\c{c}}oise Roy", title = "Complexity of deciding connectivity in real algebraic sets: recent results and future research directions", crossref = "vanderHoeven:2012:IPI", pages = "3--5", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442831", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The number of connected components of a real algebraic set defined in $R^k$ by equations of degree $d$ is $O(d)^k$ which is polynomial in the degree, and singly exponential in the number of variables. Moreover it is very easy to design algebraic sets defined by polynomials of degree $2 d$ in $k$ variables with $O(d)^k$ connected components.", acknowledgement = ack-nhfb, } @InProceedings{Sagraloff:2012:WNM, author = "Michael Sagraloff", title = "When {Newton} meets {Descartes}: a simple and fast algorithm to isolate the real roots of a polynomial", crossref = "vanderHoeven:2012:IPI", pages = "297--304", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442872", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We introduce a novel algorithm denoted NewDsc to isolate the real roots of a univariate square-free polynomial f with integer coefficients. The algorithm iteratively subdivides an initial interval which is known to contain all real roots of f and performs exact (rational) operations on the coefficients of f in each step. For the subdivision strategy, we combine Descartes' Rule of Signs and Newton iteration. More precisely, instead of using a fixed subdivision strategy such as bisection in each iteration, a Newton step based on the number of sign variations for an actual interval is considered, and, only if the Newton step fails, we fall back to bisection. Following this approach, quadratic convergence towards the real roots is achieved in most iterations. In terms of complexity, our method induces a recursion tree of almost optimal size $ O (n \cdot \log (n \tau)) $, where $n$ denotes the degree of the polynomial and \tau the bitsize of its coefficients. The latter bound constitutes an improvement by a factor of \tau upon all existing subdivision methods for the task of isolating the real roots. We further provide a detailed complexity analysis which shows that NewDsc needs only $ {\tilde O}(n^3 \tau) $ bit operations to isolate all real roots of f. In comparison to existing asymptotically fast numerical algorithms (e.g. the algorithms by V. Pan and A. Sch{\"o}nhage), NewDsc is much easier to access and, due to its similarities to the classical Descartes method, it seems to be well suited for an efficient implementation.", acknowledgement = ack-nhfb, } @InProceedings{Scheiblechner:2012:ERC, author = "Peter Scheiblechner", title = "Effective {de Rham} cohomology: the hypersurface case", crossref = "vanderHoeven:2012:IPI", pages = "305--310", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442873", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We prove an effective bound for the degrees of generators of the algebraic de Rham cohomology of smooth affine hypersurfaces. In particular, we show that the de Rham cohomology $ H^p_{dR}(X) $ of a smooth hypersurface $X$ of degree $d$ in $ C^n $ can be generated by differential forms of degree $ d^{O(pn)} $. This result is relevant for the algorithmic computation of the cohomology, but is also motivated by questions in the theory of ordinary differential equations related to the infinitesimal Hilbert 16th problem.", acknowledgement = ack-nhfb, } @InProceedings{Seress:2012:CCR, author = "{\'A}kos Seress", title = "Construction of $2$-closed {$M$}-representations", crossref = "vanderHoeven:2012:IPI", pages = "311--318", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442874", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The sporadic simple group Monster, denoted by M, acts on the Griess algebra, which is a real vector space of dimension 196,884, equipped with a positive definite scalar product and a bilinear, commutative, and non-associative algebra product. Certain properties of this linear representation of M, together with properties (discovered by Conway and Miyamoto) of idempotents in the Griess algebra that correspond to 2A involutions in M, have been defined by Ivanov as the M-representation of the Monster. This definition enables us to talk about M-representations of arbitrary groups G that are generated by involutions. In general, an M-representation may or may not exist, but if G is isomorphic to a subgroup of the Monster and a representation is isomorphic to the corresponding subalgebra of the Griess algebra then we say that the M-representation is based on an embedding of G in the Monster. In this paper, we describe a generic theoretical procedure to construct M-representations, and a GAP computer program that implements the procedure. It turns out that in many cases the representations are based on embeddings in the Monster, thereby providing a valuable tool of studying subalgebras of the Griess algebra that were unaccessible in the 196,884-dimensional setting.", acknowledgement = ack-nhfb, } @InProceedings{Sharma:2012:NOT, author = "Vikram Sharma and Chee K. Yap", title = "Near optimal tree size bounds on a simple real root isolation algorithm", crossref = "vanderHoeven:2012:IPI", pages = "319--326", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442875", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The problem of isolating all real roots of a square-free integer polynomial $ f(X) $ inside any given interval $ I_0 $ is a fundamental problem. EVAL is a simple and practical exact numerical algorithm for this problem: it recursively bisects $ I_0 $, and any sub-interval $ I \subseteq I_0 $, until a certain numerical predicate $ C_0 (I) V C_1 (I) $ holds on each $I$. We prove that the size of the recursion tree is $ O(d (L + r + \log d)) $ where $f$ has degree $d$, its coefficients have absolute values $ < 2^L $, and $ I_0 $ contains $r$ roots of $f$. In the range $ L \geq d $, our bound is the sharpest known, and provably optimal. Our results are closely paralleled by recent bounds on EVAL by Sagraloff--Yap (ISSAC 2011) and Burr--Krahmer (2012). In the range $ L \leq d $, our bound is incomparable with those of Sagraloff--Yap or Burr--Krahmer. Similar to the Burr--Krahmer proof, we exploit the technique of ``continuous amortization'' from Burr--Krahmer--Yap (2009), namely to bound the tree size by an integral $ \int_I O G(x) \, d x $ over a suitable ``charging function'' $ G(x) $. We give an application of this feature to the problem of ray-shooting (i.e., finding smallest root in a given interval).", acknowledgement = ack-nhfb, } @InProceedings{Slavici:2012:EPM, author = "Vlad Slavici and Daniel Kunkle and Gene Cooperman and Stephen Linton", title = "An efficient programming model for memory-intensive recursive algorithms using parallel disks", crossref = "vanderHoeven:2012:IPI", pages = "327--334", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442876", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In order to keep up with the demand for solutions to problems with ever-increasing data sets, both academia and industry have embraced commodity computer clusters with locally attached disks or SANs as an inexpensive alternative to supercomputers. With the advent of tools for parallel disks programming, such as MapReduce, STXXL and Roomy --- that allow the developer to focus on higher-level algorithms --- the programmer productivity for memory-intensive programs has increased many-fold. However, such parallel tools were primarily targeted at iterative programs. We propose a programming model for migrating recursive RAM-based legacy algorithms to parallel disks. Many memory-intensive symbolic algebra algorithms are most easily expressed as recursive algorithms. In this case, the programming challenge is multiplied, since the developer must re-structure such an algorithm with two criteria in mind: converting a naturally recursive algorithm into an iterative algorithm, while simultaneously exposing any potential data parallelism (as needed for parallel disks). This model alleviates the large effort going into the design phase of an external memory algorithm. Research in this area over the past 10 years has focused on per-problem solutions, without providing much insight into the connection between legacy algorithms and out-of-core algorithms. Our method shows how legacy algorithms employing recursion and non-streaming memory access can be more easily translated into efficient parallel disk-based algorithms. We demonstrate the ideas on a largest computation of its kind: the determinization via subset construction and minimization of very large nondeterministic finite set automata (NFA). To our knowledge, this is the largest subset construction reported in the literature. Determinization for large NFA has long been a large computational hurdle in the study of permutation classes defined by token passing networks. The programming model was used to design and implement an efficient NFA determinization algorithm that solves the next stage in analyzing token passing networks representing two stacks in series.", acknowledgement = ack-nhfb, } @InProceedings{Strassen:2012:ASM, author = "Volker Strassen", title = "Asymptotic spectrum and matrix multiplication", crossref = "vanderHoeven:2012:IPI", pages = "6--7", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442832", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The minimal number of arithmetic operations sufficient to multiply matrices of order m by an algebraic circuit has the form m$^{\omega + o(1)}$, where o(1) goes to zero when m tends to infinity. \omega is called the exponent of matrix multiplication. Asymptotically, it controls the complexity of almost all significant computational tasks of linear algebra. The desire to determine \omega has been the main motivation for investigating the complexity of bilinear maps in general.", acknowledgement = ack-nhfb, } @InProceedings{Strzebonski:2012:SPS, author = "Adam Strzebo{\'n}ski", title = "Solving polynomial systems over semialgebraic sets represented by cylindrical algebraic formulas", crossref = "vanderHoeven:2012:IPI", pages = "335--342", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442877", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Cylindrical algebraic formulas are an explicit representation of semialgebraic sets as finite unions of cylindrically arranged disjoint cells bounded by graphs of algebraic functions. We present a version of the Cylindrical Algebraic Decomposition (CAD) algorithm customized for solving systems of polynomial equations and inequalities over semialgebraic sets given in this representation. The algorithm can also be used to solve conjunctions of polynomial conditions in an incremental manner. We show application examples and give an empirical comparison of incremental and direct CAD computation.", acknowledgement = ack-nhfb, } @InProceedings{Strzebonski:2012:URR, author = "Adam Strzebo{\'n}ski and Elias P. Tsigaridas", title = "Univariate real root isolation in multiple extension fields", crossref = "vanderHoeven:2012:IPI", pages = "343--350", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442878", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/mathematica.bib", abstract = "We present algorithmic, complexity and implementation results for the problem of isolating the real roots of a univariate polynomial in $ B_\alpha \in L [y] $, where $ L = Q(\alpha_1, \ldots {}, \alpha_l) $ is an algebraic extension of the rational numbers. Our bounds are single exponential in $l$ and match the ones presented in [34] for the case $ l = 1 $. We consider two approaches. The first, indirect approach, using multivariate resultants, computes a univariate polynomial with integer coefficients, among the real roots of which are the real roots of $ B_\alpha $. The Boolean complexity of this approach is $ O_B(N^{4 l + 4}) $, where $N$ is the maximum of the degrees and the coefficient bitsize of the involved polynomials. The second, direct approach, tries to solve the polynomial directly, without reducing the problem to a univariate one. We present an algorithm that generalizes Sturm algorithm from the univariate case, and modified versions of well known solvers that are either numerical or based on Descartes' rule of sign. We achieve a Boolean complexity of $ O_B $ [equation], respectively. We implemented the algorithms in C as part of the core library of Mathematica and we illustrate their efficiency over various data sets.", acknowledgement = ack-nhfb, } @InProceedings{Sullivant:2012:AS, author = "Seth Sullivant", title = "Algebraic statistics", crossref = "vanderHoeven:2012:IPI", pages = "11--11", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442835", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Algebraic statistics advocates polynomial algebra as a tool for addressing problems in statistics and its applications. This connection is based on the fact that most statistical models are defined either parametrically or implicitly via polynomial equations. The idea is summarized by the phrase ``Statistical models are semialgebraic sets''. My tutorial will consist of a detailed study of two examples where the algebra/statistics connection has proven especially useful: in the study of phylogenetic models and in the analysis of contingency tables.", acknowledgement = ack-nhfb, } @InProceedings{Sun:2012:SBA, author = "Yao Sun and Dingkang Wang and Xiaodong Ma and Yang Zhang", title = "A signature-based algorithm for computing {Gr{\"o}bner} bases in solvable polynomial algebras", crossref = "vanderHoeven:2012:IPI", pages = "351--358", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442879", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Signature-based algorithms, including F5, F5C, G2V and GVW, are efficient algorithms for computing Gr{\"o}bner bases in commutative polynomial rings. In this paper, we present a signature-based algorithm to compute Gr{\"o}bner bases in solvable polynomial algebras which include usual commutative polynomial rings and some non-commutative polynomial rings like Weyl algebra. The generalized Rewritten Criterion (discussed in Sun and Wang, ISSAC 2011) is used to reject redundant computations. When this new algorithm uses the partial order implied by GVW, its termination is proved without special assumptions on computing orders of critical pairs. Data structures similar to F5 can be used to speed up this new algorithm, and Gr{\"o}bner bases of syzygy modules of input polynomials can be obtained from the outputs easily. Experimental data show that most redundant computations can be avoided in this new algorithm.", acknowledgement = ack-nhfb, } @InProceedings{vanderHoeven:2012:CMB, author = "Joris van der Hoeven and Gr{\'e}goire Lecerf", title = "On the complexity of multivariate blockwise polynomial multiplication", crossref = "vanderHoeven:2012:IPI", pages = "211--218", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442861", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this article, we study the problem of multiplying two multivariate polynomials which are somewhat but not too sparse, typically like polynomials with convex supports. We design and analyze an algorithm which is based on blockwise decomposition of the input polynomials, and which performs the actual multiplication in an FFT model or some other more general so called ``evaluated model''. If the input polynomials have total degrees at most d, then, under mild assumptions on the coefficient ring, we show that their product can be computed with $O(s^{1.5337})$ ring operations, where $s$ denotes the number of all the monomials of total degree at most $2 d$.", acknowledgement = ack-nhfb, } @InProceedings{Zhang:2012:FDO, author = "Mingbo Zhang and Yong Luo", title = "Factorization of differential operators with ordinary differential polynomial coefficients", crossref = "vanderHoeven:2012:IPI", pages = "359--365", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442880", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we present an algorithm to factor a differential operator $ L = \sigma^n + c_{n - 1} \sigma^{n - 1} + \cdot \cdot \cdot + c_1 \sigma + c_0 $ with coefficients $ c_i $ in $ C \{ y \} $, where $C$ is a constant field and $ C \{ y \} $ is the ordinary differential polynomial ring over $C$. Also, we discuss the applications of the algorithm in decomposing nonlinear differential polynomials and factoring differential operators with coefficients in the extension field of $C$.", acknowledgement = ack-nhfb, } @InProceedings{Zhou:2012:CMN, author = "Wei Zhou and George Labahn and Arne Storjohann", title = "Computing minimal nullspace bases", crossref = "vanderHoeven:2012:IPI", pages = "366--373", year = "2012", DOI = "https://doi.org/10.1145/2442829.2442881", bibdate = "Fri Mar 14 13:49:05 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we present a deterministic algorithm for the computation of a minimal nullspace basis of an $ m \times n $ input matrix of univariate polynomials over a field $K$ with $ m \eq n $. This algorithm computes a minimal nullspace basis of a degree $d$ input matrix with a cost of $ O \tilde (n_\omega \lceil m d / n \rceil) $ field operations in $K$. Here the soft-$O$ notation is Big-$O$ with $ \log $ factors removed while $ \omega $ is the exponent of matrix multiplication. The same algorithm also works in the more general situation on computing a shifted minimal nullspace basis, with a given degree shift [equation] whose entries bound the corresponding column degrees of the input matrix. In this case if $ \rho $ is the sum of the $m$ largest entries of $s$, then a $s$-minimal right nullspace basis can be computed with a cost of $ O \tilde (n^\omega \rho / m) $ field operations.", acknowledgement = ack-nhfb, } @InProceedings{Arnold:2013:NTF, author = "Andrew Arnold", title = "A new truncated {Fourier Transform} algorithm", crossref = "Monagan:2013:IPI", pages = "15--22", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465957", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Truncated Fourier Transforms (TFTs), first introduced by van der Hoeven, refer to a family of algorithms that attempt to smooth ``jumps'' in complexity exhibited by FFT algorithms. We present an in-place TFT whose time complexity, measured in terms of ring operations, is asymptotically equivalent to existing not-in-place TFT methods. We also describe a transformation that maps between two families of TFT algorithms that use different sets of evaluation points.", acknowledgement = ack-nhfb, } @InProceedings{Bach:2013:ACS, author = "Eric Bach and Jonathan P. Sorenson", title = "Approximately counting semismooth integers", crossref = "Monagan:2013:IPI", pages = "23--30", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465933", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "An integer $n$ is $ (y, z) $-semismooth if $ n = p m $ where $m$ is an integer with all prime divisors $ \geq y $ and $p$ is $1$ or a prime $ \geq z $. Large quantities of semismooth integers are utilized in modern integer factoring algorithms, such as the number field sieve, that incorporate the so-called large prime variant. Thus, it is useful for factoring practitioners to be able to estimate the value of $ \Psi (x, y, z) $, the number of $ (y, z) $-semismooth integers up to $x$, so that they can better set algorithm parameters and minimize running times, which could be weeks or months on a cluster supercomputer. In this paper, we explore several algorithms to approximate $ \Psi (x, y, z) $ using a generalization of Buchstab's identity with numeric integration.", acknowledgement = ack-nhfb, } @InProceedings{Basson:2013:EEL, author = "Romain Basson and Reynald Lercier and Christophe Ritzenthaler and Jeroen Sijsling", title = "An explicit expression of the {L{\"u}roth} invariant", crossref = "Monagan:2013:IPI", pages = "31--36", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465507", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this short note, we give an algorithm that returns an explicit expression of the L{\"u}roth invariant in terms of the Dixmier-Ohno invariants of plane quartic curves. We also obtain an explicit factorized expression on the locus of Ciani quartics in terms of the coefficients. After this calculation, we extend our methods to answer two open theoretical questions concerning the sub-locus of singular L{\"u}roth quartics.", acknowledgement = ack-nhfb, } @InProceedings{Berthe:2013:MGP, author = "Val{\'e}rie Berth{\'e} and Jean Creusefond and Lo{\"\i}ck Lhote and Brigitte Vall{\'e}e", title = "Multiple {GCDs}. {Probabilistic} analysis of the plain algorithm", crossref = "Monagan:2013:IPI", pages = "37--44", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465512", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper provides a probabilistic analysis of an algorithm which computes the gcd of l inputs (with l \geq 2), with a succession of l --- 1 phases, each of them being the Euclid algorithm on two entries. This algorithm is both basic and natural, and two kinds of inputs are studied: polynomials over the finite field F$_q$ and integers. The analysis exhibits the precise probabilistic behaviour of the main parameters, namely the number of iterations in each phase and the evolution of the length of the current gcd along the execution. We first provide an average-case analysis. Then we make it even more precise by a distributional analysis. Our results rigorously exhibit two phenomena: (i) there is a strong difference between the first phase, where most of the computations are done and the remaining phases; (ii) there is a strong similarity between the polynomial and integer cases, as can be expected.", acknowledgement = ack-nhfb, } @InProceedings{Bessonov:2013:ICP, author = "Mariya Bessonov and Alexey Ovchinnikov and Maxwell Shapiro", title = "Integrability conditions for parameterized linear difference equations", crossref = "Monagan:2013:IPI", pages = "45--52", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465942", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We study integrability conditions for systems of parameterized linear difference equations and related properties of linear differential algebraic groups. We show that isomonodromicity of such a system is equivalent to isomonodromicity with respect to each parameter separately under a linearly differentially closed assumption on the field of differential parameters. Due to our result, it is no longer necessary to solve non-linear differential equations to verify isomonodromicity, which will improve efficiency of computation with these systems. Moreover, it is not possible to further strengthen this result by removing the requirement on the parameters, as we show by giving a counterexample. We also discuss the relation between isomonodromicity and the properties of the associated parameterized difference Galois group.", acknowledgement = ack-nhfb, } @InProceedings{Betten:2013:RCC, author = "Anton Betten", title = "Rainbow cliques and the classification of small {BLT-sets}", crossref = "Monagan:2013:IPI", pages = "53--60", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465508", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In Finite Geometry, a class of objects known as BLT-sets play an important role. They are points on the Q (4, q) quadric satisfying a condition on triples. This paper is a contribution to the difficult problem of classifying these sets up to isomorphism, i.e., up to the action of the automorphism group of the quadric. We reduce the classification problem of these sets to the problem of classifying rainbow cliques in graphs. This allows us to classify BLT-sets for all orders q in the range 31 to 67.", acknowledgement = ack-nhfb, } @InProceedings{Bi:2013:SLR, author = "Jingguo Bi and Qi Cheng and J. Maurice Rojas", title = "Sub-linear root detection, and new hardness results, for sparse polynomials over finite fields", crossref = "Monagan:2013:IPI", pages = "61--68", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465514", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a deterministic $ 2^{O(t)} q^{t - 2 / t - 1 + o (1)} $ algorithm to decide whether a univariate polynomial $f$, with exactly $t$ monomial terms and degree $ < q $, has a root in $ F_q $. Our method is the first with complexity sub-linear in $q$ when $t$ is fixed. We also prove a structural property for the nonzero roots in $ F_q $ of any $t$-nomial: the nonzero roots always admit a partition into no more than $ 2 \sqrt t - 1 (q - 1)^{t - 2 / t - 1} $ cosets of two subgroups $ S_1 \subseteq S_2 $ of $ F*_q $. This can be thought of as a finite field analogue of Descartes' Rule. A corollary of our results is the first deterministic sub-linear algorithm for detecting common degree one factors of $k$-tuples of $t$-nomials in $ F_q[x] $ when $k$ and $t$ are fixed. When $t$ is not fixed we show that, for $p$ prime, detecting roots in $ F_p $ for $f$ is NP-hard with respect to BPP-reductions. Finally, we prove that if the complexity of root detection is sub-linear (in a refined sense), relative to the straight-line program encoding, then $ {\rm NEXP} \subseteq P / {\em poly} $.", acknowledgement = ack-nhfb, } @InProceedings{Boady:2013:TRS, author = "Mark Boady and Pavel Grinfeld and Jeremy Johnson", title = "A term rewriting system for the calculus of moving surfaces", crossref = "Monagan:2013:IPI", pages = "69--76", year = "2013", DOI = "https://doi.org/10.1145/2465506.2466576", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The calculus of moving surfaces (CMS) is an analytic framework that extends the tensor calculus to deforming manifolds. We have applied the CMS to a number of boundary variation problems using a Term Rewrite System (TRS). The TRS is used to convert the initial CMS expression into a form that can be evaluated. The CMS produces expressions that are true for all coordinate spaces. This makes it very powerful but applications remain limited by a rapid growth in the size of expressions. We have extended results on existing problems to orders that had been previously intractable. In this paper, we describe our TRS and our method for evaluating CMS expressions on a specific coordinate system. Our work has already provided new insight into problems of current interest to researchers in the CMS.", acknowledgement = ack-nhfb, } @InProceedings{Bostan:2013:CET, author = "Alin Bostan and Fr{\'e}d{\'e}ric Chyzak and {\'E}lie de Panafieu", title = "Complexity estimates for two uncoupling algorithms", crossref = "Monagan:2013:IPI", pages = "85--92", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465941", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Uncoupling algorithms transform a linear differential system of first order into one or several scalar differential equations. We examine two approaches to uncoupling: the cyclic-vector method (CVM) and the Danilevski-Barkatou-Z{\"u}rcher algorithm (DBZ). We give tight size bounds on the scalar equations produced by CVM, and design a fast variant of CVM whose complexity is quasi-optimal with respect to the output size. We exhibit a strong structural link between CVM and DBZ enabling to show that, in the generic case, DBZ has polynomial complexity and that it produces a single equation, strongly related to the output of CVM. We prove that algorithm CVM is faster than DBZ by almost two orders of magnitude, and provide experimental results that validate the theoretical complexity analyses.", acknowledgement = ack-nhfb, } @InProceedings{Bostan:2013:CTR, author = "Alin Bostan and Pierre Lairez and Bruno Salvy", title = "Creative telescoping for rational functions using the {Griffiths--Dwork} method", crossref = "Monagan:2013:IPI", pages = "93--100", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465935", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Creative telescoping algorithms compute linear differential equations satisfied by multiple integrals with parameters. We describe a precise and elementary algorithmic version of the Griffiths--Dwork method for the creative telescoping of rational functions. This leads to bounds on the order and degree of the coefficients of the differential equation, and to the first complexity result which is single exponential in the number of variables. One of the important features of the algorithm is that it does not need to compute certificates. The approach is vindicated by a prototype implementation.", acknowledgement = ack-nhfb, } @InProceedings{Bostan:2013:HRC, author = "Alin Bostan and Shaoshi Chen and Fr{\'e}d{\'e}ric Chyzak and Ziming Li and Guoce Xin", title = "{Hermite} reduction and creative telescoping for hyperexponential functions", crossref = "Monagan:2013:IPI", pages = "77--84", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465946", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a new reduction algorithm that simultaneously extends Hermite's reduction for rational functions and the Hermite-like reduction for hyperexponential functions. It yields a unique additive decomposition that allows to decide hyperexponential integrability. Based on this reduction algorithm, we design a new algorithm to compute minimal telescopers for bivariate hyperexponential functions. One of its main features is that it can avoid the costly computation of certificates. Its implementation outperforms Maple's function DEtools[Zeilberger]. We also derive an order bound on minimal telescopers that is tighter than the known ones.", acknowledgement = ack-nhfb, } @InProceedings{Boulier:2013:IDF, author = "Fran{\c{c}}ois Boulier and Fran{\c{c}}ois Lemaire and Georg Regensburger and Markus Rosenkranz", title = "On the integration of differential fractions", crossref = "Monagan:2013:IPI", pages = "101--108", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465934", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we provide a differential algebra algorithm for integrating fractions of differential polynomials. It is not restricted to differential fractions that are the derivatives of other differential fractions. The algorithm leads to new techniques for representing differential fractions, which may help converting differential equations to integral equations (as for example used in parameter estimation).", acknowledgement = ack-nhfb, } @InProceedings{Bouzidi:2013:RUR, author = "Yacine Bouzidi and Sylvain Lazard and Marc Pouget and Fabrice Rouillier", title = "Rational univariate representations of bivariate systems and applications", crossref = "Monagan:2013:IPI", pages = "109--116", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465519", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We address the problem of solving systems of two bivariate polynomials of total degree at most d with integer coefficients of maximum bitsize \tau We suppose known a linear separating form (that is a linear combination of the variables that takes different values at distinct solutions of the system) and focus on the computation of a Rational Univariate Representation (RUR). We present an algorithm for computing a RUR with worst-case bit complexity in $ {\tilde O}_B (d^7 + d^6 \tau) $ and bound the bitsize of its coefficients by $ {\tilde O}(d^2 + d \tau) $ (where $ {\tilde O}_B $ refers to bit complexities and $ {\tilde O} $ to complexities where polylogarithmic factors are omitted). We show in addition that isolating boxes of the solutions of the system can be computed from the RUR with $ {\tilde O}_B (d^8 + d^7 \tau) $ bit operations. Finally, we show how a RUR can be used to evaluate the sign of a bivariate polynomial (of degree at most $d$ and bitsize at most $ \tau $) at one real solution of the system in $ {\tilde O}_B (d^8 + d^7 \tau) $ bit operations and at all the $ \Theta (d^2) $ solutions in only $ O(d) $ times that for one solution.", acknowledgement = ack-nhfb, } @InProceedings{Bouzidi:2013:SLF, author = "Yacine Bouzidi and Sylvain Lazard and Marc Pouget and Fabrice Rouillier", title = "Separating linear forms for bivariate systems", crossref = "Monagan:2013:IPI", pages = "117--124", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465518", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present an algorithm for computing a separating linear form of a system of bivariate polynomials with integer coefficients, that is a linear combination of the variables that takes different values when evaluated at distinct (complex) solutions of the system. In other words, a separating linear form defines a shear of the coordinate system that sends the algebraic system in generic position, in the sense that no two distinct solutions are vertically aligned. The computation of such linear forms is at the core of most algorithms that solve algebraic systems by computing rational parameterizations of the solutions and, moreover, the computation of a separating linear form is the bottleneck of these algorithms, in terms of worst-case bit complexity. Given two bivariate polynomials of total degree at most $d$ with integer coefficients of bitsize at most $ \tau $, our algorithm computes a separating linear form in $ {\tilde O}_B (d^8 + d^7 \tau + d^5 \tau^2) $ bit operations in the worst case, where the previously known best bit complexity for this problem was $ {\tilde O}_B (d^{10} + d^9 \tau) $ (where $ {\tilde O} $ refers to the complexity where polylogarithmic factors are omitted and $ {\tilde O}_B $ refers to the bit complexity)", acknowledgement = ack-nhfb, } @InProceedings{Bradford:2013:CAD, author = "Russell Bradford and James H. Davenport and Matthew England and Scott McCallum and David Wilson", title = "Cylindrical algebraic decompositions for boolean combinations", crossref = "Monagan:2013:IPI", pages = "125--132", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465516", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib", abstract = "This article makes the key observation that when using cylindrical algebraic decomposition (CAD) to solve a problem with respect to a set of polynomials, it is not always the signs of those polynomials that are of paramount importance but rather the truth values of certain quantifier free formulae involving them. This motivates our definition of a Truth Table Invariant CAD (TTICAD). We generalise the theory of equational constraints to design an algorithm which will efficiently construct a TTICAD for a wide class of problems, producing stronger results than when using equational constraints alone. The algorithm is implemented fully in Maple and we present promising results from experimentation.", acknowledgement = ack-nhfb, } @InProceedings{Brown:2013:CSO, author = "Christopher W. Brown", title = "Constructing a single open cell in a cylindrical algebraic decomposition", crossref = "Monagan:2013:IPI", pages = "133--140", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465952", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper presents an algorithm that, roughly speaking, constructs a single open cell from a cylindrical algebraic decomposition (CAD). The algorithm takes as input a point and a set of polynomials, and computes a description of an open cylindrical cell containing the point in which the input polynomials have constant non-zero sign, provided the point is sufficiently generic. The paper reports on a few example computations carried out by a test implementation of the algorithm, which demonstrate the functioning of the algorithm and illustrate the sense in which it is more efficient than following the usual ``open CAD'' approach. Interest in the problem of computing a single cell from a CAD is motivated by a 2012 paper of Jovanovic and de Moura that require solving this problem repeatedly as a key step in NLSAT system. However, the example computations raise the possibility that repeated application of the new method may in fact be more efficient than the usual open CAD approach, both in time and space, for a broad range of problems.", acknowledgement = ack-nhfb, } @InProceedings{Chattopadhyay:2013:FBL, author = "Arkadev Chattopadhyay and Bruno Grenet and Pascal Koiran and Natacha Portier and Yann Strozecki", title = "Factoring bivariate lacunary polynomials without heights", crossref = "Monagan:2013:IPI", pages = "141--148", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465932", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present an algorithm which computes the multilinear factors of bivariate lacunary polynomials. It is based on a new Gap theorem which allows to test whether $ P(X) = \Sigma^k_{j = 1} \alpha_j X^{\alpha j}(1 + X)^{ = beta j} $ is identically zero in polynomial time. The algorithm we obtain is more elementary than the one by Kaltofen and Koiran (ISSAC'05) since it relies on the valuation of polynomials of the previous form instead of the height of the coefficients. As a result, it can be used to find some linear factors of bivariate lacunary polynomials over a field of large finite characteristic in probabilistic polynomial time.", acknowledgement = ack-nhfb, } @InProceedings{Chen:2013:DEO, author = "Shaoshi Chen and Maximilian Jaroschek and Manuel Kauers and Michael F. Singer", title = "Desingularization explains order-degree curves for ore operators", crossref = "Monagan:2013:IPI", pages = "157--164", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465510", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Desingularization is the problem of finding a left multiple of a given Ore operator in which some factor of the leading coefficient of the original operator is removed. An order-degree curve for a given Ore operator is a curve in the $ (r, d) $-plane such that for all points $ (r, d) $ above this curve, there exists a left multiple of order $r$ and degree $d$ of the given operator. We give a new proof of a desingularization result by Abramov and van Hoeij for the shift case, and show how desingularization implies order-degree curves which are extremely accurate in examples.", acknowledgement = ack-nhfb, } @InProceedings{Chen:2013:NVH, author = "Jingwei Chen and Damien Stehl{\'e} and Gilles Villard", title = "A new view on {HJLS} and {PSLQ}: sums and projections of lattices", crossref = "Monagan:2013:IPI", pages = "149--156", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465936", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The HJLS and PSLQ algorithms are the de facto standards for discovering non-trivial integer relations between a given tuple of real numbers. In this work, we provide a new interpretation of these algorithms, in a more general and powerful algebraic setup: we view them as special cases of algorithms that compute the intersection between a lattice and a vector subspace. Further, we extract from them the first algorithm for manipulating finitely generated additive subgroups of a Euclidean space, including projections of lattices and finite sums of lattices. We adapt the analyses of HJLS and PSLQ to derive correctness and convergence guarantees.", acknowledgement = ack-nhfb, } @InProceedings{Cohn:2013:SES, author = "Henry Cohn", title = "Solving equations with size constraints for the solutions", crossref = "Monagan:2013:IPI", pages = "1--2", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465927", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{DeFeo:2013:FAA, author = "Luca {De Feo} and Javad Doliskani and Eric Schost", title = "Fast algorithms for $l$-adic towers over finite fields", crossref = "Monagan:2013:IPI", pages = "165--172", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465956", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Inspired by previous work of Shoup, Lenstra-De Smit and Couveignes-Lercier, we give fast algorithms to compute in the first levels of the $l$-adic closure of a finite field. In many cases, our algorithms have quasi-linear complexity.", acknowledgement = ack-nhfb, } @InProceedings{Dickenstein:2013:CDR, author = "Alicia Dickenstein and Ioannis Z. Emiris and Vissarion Fisikopoulos", title = "Combinatorics of $4$-dimensional resultant polytopes", crossref = "Monagan:2013:IPI", pages = "173--180", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465937", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The Newton polytope of the resultant, or resultant polytope, characterizes the resultant polynomial more precisely than total degree. The combinatorics of resultant polytopes are known in the Sylvester case [Gelfand et al.90] and up to dimension 3 [Sturmfels 94]. We extend this work by studying the combinatorial characterization of 4-dimensional resultant polytopes, which show a greater diversity and involve computational and combinatorial challenges. In particular, our experiments, based on software respol for computing resultant polytopes, establish lower bounds on the maximal number of faces. By studying mixed subdivisions, we obtain tight upper bounds on the maximal number of facets and ridges, thus arriving at the following maximal f-vector: (22,66,66,22), i.e. vector of face cardinalities. Certain general features emerge, such as the symmetry of the maximal f-vector, which are intriguing but still under investigation. We establish a result of independent interest, namely that the f-vector is maximized when the input supports are sufficiently generic, namely full dimensional and without parallel edges. Lastly, we offer a classification result of all possible 4-dimensional resultant polytopes.", acknowledgement = ack-nhfb, } @InProceedings{Dumas:2013:SCR, author = "Jean-Guillaume Dumas and Cl{\'e}ment Pernet and Ziad Sultan", title = "Simultaneous computation of the row and column rank profiles", crossref = "Monagan:2013:IPI", pages = "181--188", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465517", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Gaussian elimination with full pivoting generates a PLUQ matrix decomposition. Depending on the strategy used in the search for pivots, the permutation matrices can reveal some information about the row or the column rank profiles of the matrix. We propose a new pivoting strategy that makes it possible to recover at the same time both row and column rank profiles of the input matrix and of any of its leading sub-matrices. We propose a rank-sensitive and quad-recursive algorithm that computes the latter PLUQ triangular decomposition of an $m \times n$ matrix of rank $r$ in $O(m n r^{\omega - 2})$ field operations, with \omega the exponent of matrix multiplication. Compared to the LEU decomposition by Malashonock, sharing a similar recursive structure, its time complexity is rank sensitive and has a lower leading constant. Over a word size finite field, this algorithm also improves the practical efficiency of previously known implementations.", acknowledgement = ack-nhfb, } @InProceedings{Eder:2013:SRG, author = "Christian Eder and Bjarke Hammersholt Roune", title = "Signature rewriting in {Gr{\"o}bner} basis computation", crossref = "Monagan:2013:IPI", pages = "331--338", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465522", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We introduce the RB algorithm for Gr{\"o}bner basis computation, a simpler yet equivalent algorithm to F5GEN. RB contains the original unmodified F5 algorithm as a special case, so it is possible to study and understand F5 by considering the simpler RB. We present simple yet complete proofs of this fact and of F5's termination and correctness. RB is parametrized by a rewrite order and it contains many published algorithms as special cases, including SB. We prove that SB is the best possible instantiation of RB in the following sense. Let X be any instantiation of RB (such as F5). Then the S-pairs reduced by SB are always a subset of the S-pairs reduced by X and the basis computed by SB is always a subset of the basis computed by X.", acknowledgement = ack-nhfb, } @InProceedings{ElDin:2013:CPM, author = "Mohab Safey {El Din}", title = "Critical point methods and effective real algebraic geometry: new results and trends", crossref = "Monagan:2013:IPI", pages = "5--6", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465928", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{Faugere:2013:CCG, author = "Jean-Charles Faug{\`e}re and Mohab Safey {El Din} and Thibaut Verron", title = "On the complexity of computing {Gr{\"o}bner} bases for quasi-homogeneous systems", crossref = "Monagan:2013:IPI", pages = "189--196", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465943", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let $K$ be a field and $(f_1, \ldots{}, f_n) \subset K[X_1, \ldots{}, X_n]$ be a sequence of quasi-homogeneous polynomials of respective weighted degrees $(d_1, \ldots{}, d_n)$ w.r.t a system of weights ($w_1$, \ldots{}, $w_n$). Such systems are likely to arise from a lot of applications, including physics or cryptography. We design strategies for computing Gr{\"o}bner bases for quasi-homogeneous systems by adapting existing algorithms for homogeneous systems to the quasi-homogeneous case. Overall, under genericity assumptions, we show that for a generic zero-dimensional quasi homogeneous system, the complexity of the full strategy is polynomial in the weighted B{\'e}zout bound $\Pi_{i =1}^n d^i / \Pi _{i =1}^n w^i$. We provide some experimental results based on generic systems as well as systems arising from a cryptography problem. They show that taking advantage of the quasi-homogeneous structure of the systems allow us to solve systems that were out of reach otherwise.", acknowledgement = ack-nhfb, } @InProceedings{Faugere:2013:GBI, author = "Jean-Charles Faugere and Jules Svartz", title = "{Gr{\"o}bner} bases of ideals invariant under a commutative group: the non-modular case", crossref = "Monagan:2013:IPI", pages = "347--354", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465944", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We propose efficient algorithms to compute the Gr{\"o}bner basis of an ideal I subset k [ x$_1$,\ldots{}, x$_n$ ] globally invariant under the action of a commutative matrix group G, in the non-modular case (where char (k) doesn't divide | G |). The idea is to simultaneously diagonalize the matrices in G, and apply a linear change of variables on I corresponding to the base-change matrix of this diagonalization. We can now suppose that the matrices acting on I are diagonal. This action induces a grading on the ring R=k [ x$_1$,\ldots{}, x$_n$ ], compatible with the degree, indexed by a group related to G, that we call G -degree. The next step is the observation that this grading is maintained during a Gr{\"o}bner basis computation or even a change of ordering, which allows us to split the Macaulay matrices into | G | submatrices of roughly the same size. In the same way, we are able to split the canonical basis of R/I (the staircase) if I is a zero-dimensional ideal. Therefore, we derive abelian versions of the classical algorithms F$_4$, F$_5$ or FGLM. Moreover, this new variant of F$_4$ / F$_5$ allows complete parallelization of the linear algebra steps, which has been successfully implemented. On instances coming from applications (NTRU crypto-system or the Cyclic-n problem), a speed-up of more than 400 can be obtained. For example, a Gr{\"o}bner basis of the Cyclic-11 problem can be solved in less than 8 hours with this variant of F$_4$. Moreover, using this method, we can identify new classes of polynomial systems that can be solved in polynomial time.", acknowledgement = ack-nhfb, } @InProceedings{Guo:2013:CRS, author = "Qingdong Guo and Mohab Safey {El Din} and Lihong Zhi", title = "Computing rational solutions of linear matrix inequalities", crossref = "Monagan:2013:IPI", pages = "197--204", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465949", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Consider a $ (D \times D) $ symmetric matrix $A$ whose entries are linear forms in $ Q[X^1, \ldots {}, X_k] $ with coefficients of bit size $ \leq \tau $. We provide an algorithm which decides the existence of rational solutions to the linear matrix inequality $ A \geq 0 $ and outputs such a rational solution if it exists. This problem is of first importance: it can be used to compute algebraic certificates of positivity for multivariate polynomials. Our algorithm runs within $ (k < =)^{O(1)} 2^{O(\min (k, D))} D^2 D^O (D^2) $ bit operations; the bit size of the output solution is dominated by $ \tau^{O(1)} 2^{O(\min (k, D))} D^2 $. These results are obtained by designing algorithmic variants of constructions introduced by Klep and Schweighofer. This leads to the best complexity bounds for deciding the existence of sums of squares with rational coefficients of a given polynomial. We have implemented the algorithm; it has been able to tackle Scheiderer's example of a multivariate polynomial that is a sum of squares over the reals but not over the rationals; providing the first computer validation of this counter-example to Sturmfels' conjecture.", acknowledgement = ack-nhfb, } @InProceedings{Hulpke:2013:CST, author = "Alexander J. Hulpke", title = "Calculation of the subgroups of a trivial-fitting group", crossref = "Monagan:2013:IPI", pages = "205--210", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465525", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We describe an algorithm to determine representatives of the conjugacy classes of subgroups of a Trivial-Fitting group, this case being the one prior algorithms reduce to. As a subtask we describe an algorithm for determining conjugacy classes of complements to an arbitrary normal subgroup if the factor group is solvable.", acknowledgement = ack-nhfb, } @InProceedings{Johansson:2013:FHS, author = "Fredrik Johansson and Manuel Kauers and Marc Mezzarobba", title = "Finding hyperexponential solutions of linear {ODEs} by numerical evaluation", crossref = "Monagan:2013:IPI", pages = "211--218", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465513", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a new algorithm for computing hyperexponential solutions of linear ordinary differential equations with polynomial coefficients. The algorithm relies on interpreting formal series solutions at the singular points as analytic functions and evaluating them numerically at some common ordinary point. The numerical data is used to determine a small number of combinations of the formal series that may give rise to hyperexponential solutions.", acknowledgement = ack-nhfb, } @InProceedings{Kaltofen:2013:SMF, author = "Erich L. Kaltofen and Zhengfeng Yang", title = "Sparse multivariate function recovery from values with noise and outlier errors", crossref = "Monagan:2013:IPI", pages = "219--226", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465524", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Error-correcting decoding is generalized to multivariate sparse rational function recovery from evaluations that can be numerically inaccurate and where several evaluations can have severe errors (``outliers''). The generalization of the Berlekamp-Welch decoder to exact Cauchy interpolation of univariate rational functions from values with faults is by Kaltofen and Pernet in 2012. We give a different univariate solution based on structured linear algebra that yields a stable decoder with floating point arithmetic. Our multivariate polynomial and rational function interpolation algorithm combines Zippel's symbolic sparse polynomial interpolation technique [Ph.D. Thesis MIT 1979] with the numeric algorithm by Kaltofen, Yang, and Zhi [Proc. SNC 2007], and removes outliers (``cleans up data'') through techniques from error correcting codes. Our multivariate algorithm can build a sparse model from a number of evaluations that is linear in the sparsity of the model.", acknowledgement = ack-nhfb, } @InProceedings{Kawano:2013:QFT, author = "Yasuhito Kawano and Hiroshi Sekigawa", title = "{Quantum Fourier Transform} over symmetric groups", crossref = "Monagan:2013:IPI", pages = "227--234", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465940", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This paper proposes an O (n$^4$) quantum Fourier transform (QFT) algorithm over symmetric group S$_n$, the fastest QFT algorithm of its kind. We propose a fast Fourier transform algorithm over symmetric group S$_n$, which consists of O (n$^3$) multiplications of unitary matrices, and then transform it into a quantum circuit form. The QFT algorithm can be applied to constructing the standard algorithm of the hidden subgroup problem.", acknowledgement = ack-nhfb, } @InProceedings{Kunwar:2013:SOD, author = "Vijay Jung Kunwar and Mark van Hoeij", title = "Second order differential equations with hypergeometric solutions of degree three", crossref = "Monagan:2013:IPI", pages = "235--242", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465953", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Let L be a second order linear homogeneous differential equation with rational function coefficients. The goal in this paper is to solve L in terms of hypergeometric function 2F1(a,b;c|f) where f is a rational function of degree 3.", acknowledgement = ack-nhfb, } @InProceedings{Lamban:2013:CSM, author = "Laureano Lamb{\'a}n and Francisco J. Mart{\'\i}n-Mateos and Julio Rubio and Jos{\'e}-Luis Ruiz-Reina", title = "Certified symbolic manipulation: bivariate simplicial polynomials", crossref = "Monagan:2013:IPI", pages = "243--250", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465515", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Certified symbolic manipulation is an emerging new field where programs are accompanied by certificates that, suitably interpreted, ensure the correctness of the algorithms. In this paper, we focus on algebraic algorithms implemented in the proof assistant ACL2, which allows us to verify correctness in the same programming environment. The case study is that of bivariate simplicial polynomials, a data structure used to help the proof of properties in Simplicial Topology. Simplicial polynomials can be computationally interpreted in two ways. As symbolic expressions, they can be handled algorithmically, increasing the automation in ACL2 proofs. As representations of functional operators, they help proving properties of categorical morphisms. As an application of this second view, we present the definition in ACL2 of some morphisms involved in the Eilenberg-Zilber reduction, a central part of the Kenzo computer algebra system. We have proved the ACL2 implementations are correct and tested that they get the same results as Kenzo does.", acknowledgement = ack-nhfb, } @InProceedings{Lebreton:2013:CSB, author = "Romain Lebreton and Esmaeil Mehrabi and Eric Schost", title = "On the complexity of solving bivariate systems: the case of non-singular solutions", crossref = "Monagan:2013:IPI", pages = "251--258", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465950", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We give an algorithm for solving bivariate polynomial systems over either k (T)[ X,Y ] or Q [ X,Y ] using a combination of lifting and modular composition techniques.", acknowledgement = ack-nhfb, } @InProceedings{Lenstra:2013:LS, author = "Hendrik Lenstra", title = "Lattices with symmetry", crossref = "Monagan:2013:IPI", pages = "3--4", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465929", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{Levandovskyy:2013:ECG, author = "Viktor Levandovskyy and Grischa Studzinski and Benjamin Schnitzler", title = "Enhanced computations of {Gr{\"o}bner} bases in free algebras as a new application of the letterplace paradigm", crossref = "Monagan:2013:IPI", pages = "259--266", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465948", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Recently, the notion of ``letterplace correspondence'' between ideals in the free associative algebra KX and certain ideals in the so-called letterplace ring KXP has evolved. We continue this research direction, started by La Scala and Levandovskyy, and present novel ideas, supported by the implementation, for effective computations with ideals in the free algebra by utilizing the generalized letterplace correspondance. In particular, we provide a direct algorithm to compute Gr{\"o}bner bases of non-graded ideals. Surprisingly, we realize its behavior as ``homogenizing without a homogenization variable''. Moreover, we develop new shift-invariant data structures for this family of algorithms and discuss about them. Furthermore we generalize the famous criteria of Gebauer-M{\"o}ller to the non-commutative setting and show the benefits for the computation by allowing to skip unnecessary critical pairs. The methods are implemented in the computer algebra system Singular. We present a comparison of performance of our implementation with the corresponding implementations in the systems Magma [BCP97] and GAP [GAP13] on the representative set of nontrivial examples.", acknowledgement = ack-nhfb, } @InProceedings{Levin:2013:MDD, author = "Alexander B. Levin", title = "Multivariate difference-differential dimension polynomials and new invariants of difference-differential field extensions", crossref = "Monagan:2013:IPI", pages = "267--274", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465521", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We introduce a method of characteristic sets with respect to several term orderings for difference-differential polynomials. Using this technique, we obtain a method of computation of multivariate dimension polynomials of finitely generated difference-differential field extensions. Furthermore, we find new invariants of such extensions and show how the computation of multivariate difference-differential polynomials is applied to the equivalence problem for systems of algebraic difference-differential equations.", acknowledgement = ack-nhfb, } @InProceedings{Li:2013:SDR, author = "Wei Li and Chun-Ming Yuan and Xiao-Shan Gao", title = "Sparse difference resultant", crossref = "Monagan:2013:IPI", pages = "275--282", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465509", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, the concept of sparse difference resultant for a Laurent transformally essential system of Laurent difference polynomials is introduced and its properties are proved. In particular, order and degree bounds for the sparse difference resultant are given. Based on these bounds, an algorithm to compute the sparse difference resultant is proposed, which is single exponential in terms of the number of variables, the Jacobi number, and the size of the system. Also, the precise order, degree, a determinant representation, and a Poisson-type product formula for the difference resultant are given.", acknowledgement = ack-nhfb, } @InProceedings{Mehlhorn:2013:AFR, author = "Kurt Mehlhorn and Michael Sagraloff and Pengming Wang", title = "From approximate factorization to root isolation", crossref = "Monagan:2013:IPI", pages = "283--290", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465523", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present an algorithm for isolating all roots of an arbitrary complex polynomial $p$ which also works in the presence of multiple roots provided that arbitrary good approximations of the coefficients of $p$ and the number of distinct roots are given. Its output consists of pairwise disjoint disks each containing one of the distinct roots of p, and its multiplicity. The algorithm uses approximate factorization as a subroutine. For the case, where Pan's algorithm [16] is used for the factorization, we derive complexity bounds for the problems of isolating and refining all roots which are stated in terms of the geometric locations of the roots only. Specializing the latter bounds to a polynomial of degree d and with integer coefficients of bitsize less than $ \tau $, we show that $ {\tilde O}(d^3 + d^2 \tau + d \kappa) $ bit operations are sufficient to compute isolating disks of size less than $ 2^- \kappa $ for all roots of p, where $ \kappa $ is an arbitrary positive integer. Our new algorithm has an interesting consequence on the complexity of computing the topology of a real algebraic curve specified as the zero set of a bivariate integer polynomial and for isolating the real solutions of a bivariate system. For input polynomials of degree $n$ and bitsize $ \tau $, the currently best running time improves from $ {\tilde O}(n^9 \tau + n^8 \tau^2) $ (deterministic) to $ {\tilde O}(n^6 + n^5 \tau) $ (randomized) for topology computation and from $ {\tilde O}(n^8 + n^7 \tau) $ (deterministic) to $ {\tilde O}(n^6 + n^5 \tau) $ (randomized) for solving bivariate systems.d", acknowledgement = ack-nhfb, } @InProceedings{Pan:2013:BCR, author = "Victor Y. Pan and Elias P. Tsigaridas", title = "On the boolean complexity of real root refinement", crossref = "Monagan:2013:IPI", pages = "299--306", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465938", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We assume that a real square-free polynomial $A$ has a degree $d$, a maximum coefficient bitsize \tau and a real root lying in an isolating interval and having no nonreal roots nearby (we quantify this assumption). Then, we combine the Double Exponential Sieve algorithm (also called the Bisection of the Exponents), the bisection, and Newton iteration to decrease the width of this inclusion interval by a factor of $ t = 2^{-L} $. The algorithm has Boolean complexity $ {\tilde O}_B (d^2 \tau + d L) $. Our algorithms support the same complexity bound for the refinement of $r$ roots, for any $ r < = d $.", acknowledgement = ack-nhfb, } @InProceedings{Pan:2013:TFA, author = "Senshan Pan and Yupu Hu and Baocang Wang", title = "The termination of the {$ F5 $} algorithm revisited", crossref = "Monagan:2013:IPI", pages = "291--298", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465520", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The F5 algorithm [8] is generally believed as one of the fastest algorithms for computing Gr{\"o}bner bases. However, its termination problem is still unclear. The crux lies in the non-determinacy of the F5 in selecting which from the critical pairs of the same degree. In this paper, we construct a generalized algorithm F5GEN which contain the F5 as its concrete implementation. Then we prove the correct termination of the F5GEN algorithm. That is to say, for any finite set of homogeneous polynomials, the F5 terminates correctly.", acknowledgement = ack-nhfb, } @InProceedings{Parrilo:2013:CAG, author = "Pablo A. Parrilo", title = "Convex algebraic geometry and semidefinite optimization", crossref = "Monagan:2013:IPI", pages = "9--10", year = "2013", DOI = "https://doi.org/10.1145/2465506.2466575", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In the past decade there has been a surge of interest in algebraic approaches to optimization problems defined by multivariate polynomials. Fundamental mathematical challenges that arise in this area include understanding the structure of nonnegative polynomials, the interplay between efficiency and complexity of different representations of algebraic sets, and the development of effective algorithms. Remarkably, and perhaps unexpectedly, convexity provides a new viewpoint and a powerful framework for addressing these questions. This naturally brings us to the intersection of algebraic geometry, optimization, and convex geometry, with an emphasis on algorithms and computation. This emerging area has become known as convex algebraic geometry. This tutorial will focus on basic and recent developments in convex algebraic geometry, and the associated computational methods based on semidefinite programming for optimization problems involving polynomial equations and inequalities. There has been much recent progress, by combining theoretical results in real algebraic geometry with semidefinite programming to develop effective computational approaches to these problems. We will make particular emphasis on sum of squares decompositions, general duality properties, infeasibility certificates, approximation/inapproximability results, as well as survey the many exciting developments that have taken place in the last few years.", acknowledgement = ack-nhfb, } @InProceedings{Pauderis:2013:CIS, author = "Colton Pauderis and Arne Storjohann", title = "Computing the invariant structure of integer matrices: fast algorithms into practice", crossref = "Monagan:2013:IPI", pages = "307--314", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465955", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "We present a new heuristic algorithm for computing the determinant of a nonsingular $ n \times n $ integer matrix. Extensive empirical results from a highly optimized implementation show the running time grows approximately as $ n^3 \log n $, even for input matrices with a highly nontrivial Smith invariant structure. We extend the algorithm to compute the Hermite form of the input matrix. Both the determinant and Hermite form algorithm certify correctness of the computed results.", acknowledgement = ack-nhfb, } @InProceedings{Pillwein:2013:TCP, author = "Veronika Pillwein", title = "Termination conditions for positivity proving procedures", crossref = "Monagan:2013:IPI", pages = "315--322", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465945", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Proving positivity of a sequence given by a linear recurrence with polynomial coefficients (P-finite recurrence) is a non-trivial task for both humans and computers. Algorithms dealing with this task are rare or non-existent. One method that was introduced in the last decade by Gerhold and Kauers succeeds on many examples, but termination of this procedure has been proven so far only up to order three for special cases. Here we present an analysis that extends the previously known termination results on recurrences of order three, and also provides termination conditions for recurrences of higher order.", acknowledgement = ack-nhfb, } @InProceedings{Raab:2013:IUF, author = "Clemens G. Raab", title = "Integration of unspecified functions and families of iterated integrals", crossref = "Monagan:2013:IPI", pages = "323--330", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465939", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "An algorithm for parametric elementary integration over differential fields constructed by a differentially transcendental extension is given. It extends current versions of Risch's algorithm to this setting and is based on some first ideas of Graham H. Campbell transferring his method to more formal grounds and making it parametric, which allows to compute relations among definite integrals. Apart from differentially transcendental functions, such as the gamma function or the zeta function, also unspecified functions and certain families of iterated integrals such as the polylogarithms can be modeled in such differential fields.", acknowledgement = ack-nhfb, } @InProceedings{Steffy:2013:ELI, author = "Daniel E. Steffy", title = "Exact linear and integer programming: tutorial abstract", crossref = "Monagan:2013:IPI", pages = "11--12", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465931", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This tutorial surveys state-of-the-art algorithms and computational methods for computing exact solutions to linear and mixed-integer programming problems.", acknowledgement = ack-nhfb, } @InProceedings{vanderHoeven:2013:IMC, author = "Joris van der Hoeven and Gr{\'e}goire Lecerf", title = "Interfacing {{\tt MATHEMAGIX}} with {C++}", crossref = "Monagan:2013:IPI", pages = "363--370", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465511", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we give a detailed description of the interface between the MATHEMAGIX language and C++. In particular, we describe the mechanism which allows us to import a C++ template library (which only permits static instantiation) as a fully generic MATHEMAGIX template library.", acknowledgement = ack-nhfb, } @InProceedings{vanderHoeven:2013:SFT, author = "Joris van der Hoeven and Romain Lebreton and {\'E}ric Schost", title = "Structured {FFT} and {TFT}: symmetric and lattice polynomials", crossref = "Monagan:2013:IPI", pages = "355--362", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465526", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we consider the problem of efficient computations with structured polynomials. We provide complexity results for computing Fourier Transform and Truncated Fourier Transform of symmetric polynomials, and for multiplying polynomials supported on a lattice.", acknowledgement = ack-nhfb, } @InProceedings{vanHoeij:2013:CFU, author = "Mark van Hoeij", title = "The complexity of factoring univariate polynomials over the rationals: tutorial abstract", crossref = "Monagan:2013:IPI", pages = "13--14", year = "2013", DOI = "https://doi.org/10.1145/2465506.2479779", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "This tutorial will explain the algorithm behind the currently fastest implementations for univariate factorization over the rationals. The complexity will be analyzed; it turns out that modifications were needed in order to prove a polynomial time complexity while preserving the best practical performance. The complexity analysis leads to two results: (1) it shows that the practical performance on common inputs can be improved without harming the worst case performance, and (2) it leads to an improved complexity, not only for factoring, but for LLL reduction as well.", acknowledgement = ack-nhfb, } @InProceedings{Wolfram:2013:CAY, author = "Stephen Wolfram", title = "Computer algebra: a $ 32 $-year update", crossref = "Monagan:2013:IPI", pages = "7--8", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465930", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @InProceedings{Wu:2013:FPR, author = "Wenyuan Wu and Greg Reid", title = "Finding points on real solution components and applications to differential polynomial systems", crossref = "Monagan:2013:IPI", pages = "339--346", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465954", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper we extend complex homotopy methods to finding witness points on the irreducible components of real varieties. In particular we construct such witness points as the isolated real solutions of a constrained optimization problem. First a random hyperplane characterized by its random normal vector is chosen. Witness points are computed by a polyhedral homotopy method. Some of them are at the intersection of this hyperplane with the components. Other witness points are the local critical points of the distance from the plane to components. A method is also given for constructing regular witness points on components, when the critical points are singular. The method is applicable to systems satisfying certain regularity conditions. Illustrative examples are given. We show that the method can be used in the consistent initialization phase of a popular method due to Pryce and Pantelides for preprocessing differential algebraic equations for numerical solution.", acknowledgement = ack-nhfb, } @InProceedings{Yang:2013:VEB, author = "Zhengfeng Yang and Lihong Zhi and Yijun Zhu", title = "Verified error bounds for real solutions of positive-dimensional polynomial systems", crossref = "Monagan:2013:IPI", pages = "371--378", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465951", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "In this paper, we propose two algorithms for verifying the existence of real solutions of positive-dimensional polynomial systems. The first one is based on the critical point method and the homotopy continuation method. It targets for verifying the existence of real roots on each connected component of an algebraic variety $V \cap R^n$ defined by polynomial equations. The second one is based on the low-rank moment matrix completion method and aims for verifying the existence of at least one real roots on $V \cap R^n$. Combined both algorithms with the verification algorithms for zero-dimensional polynomial systems, we are able to find verified real solutions of positive-dimensional polynomial systems very efficiently for a large set of examples.", acknowledgement = ack-nhfb, } @InProceedings{Zhou:2013:CCB, author = "Wei Zhou and George Labahn", title = "Computing column bases of polynomial matrices", crossref = "Monagan:2013:IPI", pages = "379--386", year = "2013", DOI = "https://doi.org/10.1145/2465506.2465947", bibdate = "Fri Mar 14 14:33:44 MDT 2014", bibsource = "http://portal.acm.org/; http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "Given a matrix of univariate polynomials over a field $K$, its columns generate a $ K[x] $-module. We call any basis of this module a column basis of the given matrix. Matrix gcds and matrix normal forms are examples of such module bases. In this paper we present a deterministic algorithm for the computation of a column basis of an $ m \times n $ input matrix with $ m \leq n $. If $s$ is the average column degree of the input matrix, this algorithm computes a column basis with a cost of $ {\tilde O}(n m^{\omega - 1} s) $ field operations in $K$. Here the soft-$O$ notation is Big-$O$ with log factors removed while $ \omega $ is the exponent of matrix multiplication. Note that the average column degree $s$ is bounded by the commonly used matrix degree that is also the maximum column degree of the input matrix.", acknowledgement = ack-nhfb, } %%% ==================================================================== %%% Cross-referenced entries must come last: @Proceedings{Jenks:1976:SPA, editor = "Richard D. Jenks", booktitle = "{Symsac '76: proceedings of the 1976 ACM Symposium on Symbolic and Algebraic Computation, August 10--12, 1976, Yorktown Heights, New York}", title = "{Symsac '76: proceedings of the 1976 ACM Symposium on Symbolic and Algebraic Computation, August 10--12, 1976, Yorktown Heights, New York}", publisher = pub-ACM, address = pub-ACM:adr, pages = "384", year = "1976", LCCN = "QA155.7.E4 A15 1976", bibdate = "Tue Jul 26 09:04:45 1994", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", price = "US\$20.00", acknowledgement = ack-nhfb, xxISBN = "none", } @Proceedings{Ng:1979:SAC, editor = "Edward W. Ng", booktitle = "{Symbolic and algebraic computation: EUROSAM '79, an International Symposium on Symbolic and Algebraic Manipulation, Marseille, France, June 1979}", title = "{Symbolic and algebraic computation: EUROSAM '79, an International Symposium on Symbolic and Algebraic Manipulation, Marseille, France, June 1979}", volume = "72", publisher = pub-SV, address = pub-SV:adr, pages = "xiv + 557", year = "1979", CODEN = "LNCSD9", ISBN = "0-387-09519-5", ISBN-13 = "978-0-387-09519-6", ISSN = "0302-9743 (print), 1611-3349 (electronic)", LCCN = "QA155.7.E4 E88 1979", bibdate = "Fri Apr 12 07:14:47 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, keywords = "algebra --- data processing --- congresses", } @Proceedings{Wang:1981:SPA, editor = "Paul S. Wang", booktitle = "{SYMSAC '81: proceedings of the 1981 ACM Symposium on Symbolic and Algebraic Computation, Snowbird, Utah, August 5--7, 1981}", title = "{SYMSAC '81: proceedings of the 1981 ACM Symposium on Symbolic and Algebraic Computation, Snowbird, Utah, August 5--7, 1981}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xi + 249", year = "1981", ISBN = "0-89791-047-8", ISBN-13 = "978-0-89791-047-7", LCCN = "QA155.7.E4 A28 1981", bibdate = "Fri Feb 09 12:29:36 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/macsyma.bib; http://www.math.utah.edu/pub/tex/bib/sigsam.bib", note = "ACM order no. 505810", price = "US\$23.00", acknowledgement = ack-nhfb, tableofcontents = "The basis of a computer system for modern algebra / John J. Cannon \\ A language for computational algebra / Richard D. Jenks, Barry M. Trager \\ Characterization of VAX Macsyma / John K. Foderaro, Richard J. Fateman \\ SMP - A Symbolic Manipulation Program / Chris A. Cole, Stephen Wolfram \\ An extension of Liouville s theorem on integration in finite terms / M. F. Singer, B. D. Saunders, B. F. Caviness \\ Formal solutions of differential equations in the neighborhood of singular points (Regular and Irregular) / J. Della Dora, E. Tournier \\ Elementary first integrals of differential equations / M. J. Prelle, M. F. Singer \\ A technique for solving ordinary differential equations using Riemann s P-functions / Shunro Watanabe \\ Using Lie transformation groups to find closed form solutions to first order ordinary differential equations / Bruce Char \\ The computational complexity of continued fractions / V. Strassen \\ Newton s iteration and the sparse Hensel algorithm (Extended Abstract) / Richard Zippel \\ Automatic generation of finite difference equations and Fourier stability analyses / Michael C. Wirth \\ An algorithmic classification of geometries in general relativity / Jan E. Aman, Anders Karlhede \\ Formulation of design rules for NMR imaging coil by using symbolic manipulation / John F. Schenck, M. A. Hussain \\ Computation for conductance distributions of percolation lattice cells / Rabbe Fogelholm \\ Breuer s grow factor algorithm in computer algebra / J. A. van Hulzen \\ An implementation of Kovacic s algorithm for solving second order linear homogeneous differential equations / B. David Saunders \\ Implementing a polynomial factorization and GCD package / P. M. A. Moore, A. C. Norman \\ Note on probabilistic algorithms in integer and polynomial arithmetic / Michael Kaminski \\ A case study in interlanguage communication: Fast LISP polynomial operations written in C / Richard J. Fateman \\ On the application of Array Processors to symbol manipulation / R. Beardsworth \\ The optimization of user programs for an Algebraic Manipulation System / P. D. Pearce, R. J. Hickst \\ Views on transportability of Lisp and Lisp-based systems / Richard J. Fateman \\ Algebraic constructions for algorithms (Extended Abstract) / S. Winograd \\ A cancellation free algorithm, with factoring capabilities, for the efficient solution of large sparse sets of equations / J. Smit \\ Efficient Gaussian elimination method for symbolic determinants and linear systems (Extended Abstract) / Tateaki Sasaki, Hirokazu Murao \\ Parallelism in algebraic computation and parallel algorithms for symbolic linear systems / Tateaki Sasaki, Yasumasa Kanada \\ Algebraic computation for the masses / Joel Moses \\ Construction of nilpotent Lie algebras over arbitrary fields / Robert E. Beck, Bernard Kolman \\ Algorithms for central extensions of Lie algebras Robert E. Beck, Bernard Kolman \\ Computing an invariant subring of $k[X,Y]$ / Rosalind Neuman \\ Double cosets and searching small groups / Gregory Butler \\ A generalized class of polynomials that are hard to factor / Erich Kaltofen, David R. Musser, B. David Saunders \\ Some inequalities about univariate polynomials / Maurice Mignotte \\ Factorization over finitely generated fields / James H. Davenport, Barry M. Trager \\ On solving systems of algebraic equations via ideal bases and elimination theory / Michael E. Pohst, David Y. Y. Yun \\ A p-adic algorithm for univariate partial fractions / Paul S. Wang \\ Use of VLSI in algebraic computation: Some suggestions H. T. Kung \\ An algebraic front-end for the production and use of numeric programs / Douglas H. Lanam \\ Computer algebra and numerical integration / Richard J. Fateman \\ Tracing occurrences of patterns in symbolic computations / F. Gardin, J. A. Campbell \\ The automatic derivation of periodic solutions to a class of weakly nonlinear differential equations / John Fitch \\ User-based integration software / John Fitch.", } @Proceedings{Char:1986:PSS, editor = "Bruce W. Char", booktitle = "{Proceedings of the 1986 Symposium on Symbolic and Algebraic Computation: Symsac '86, July 21--23, 1986, Waterloo, Ontario}", title = "{Proceedings of the 1986 Symposium on Symbolic and Algebraic Computation: Symsac '86, July 21--23, 1986, Waterloo, Ontario}", publisher = pub-ACM, address = pub-ACM:adr, pages = "254", year = "1986", ISBN = "0-89791-199-7 (paperback)", ISBN-13 = "978-0-89791-199-3 (paperback)", LCCN = "QA155.7.E4 A281 1986", bibdate = "Thu Mar 12 07:35:00 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order no. 505860.", acknowledgement = ack-nhfb, keywords = "Algebra --- Data processing --- Congresses; Programming languages (Electronic computers) --- Congresses", } @Proceedings{Gianni:1989:SAC, editor = "P. (Patrizia) Gianni", booktitle = "{Symbolic and algebraic computation: International Symposium ISSAC '88, Rome, Italy, July 4--8, 1988: proceedings}", title = "{Symbolic and algebraic computation: International Symposium ISSAC '88, Rome, Italy, July 4--8, 1988: proceedings}", volume = "358", publisher = pub-SV, address = pub-SV:adr, pages = "xi + 543", year = "1989", ISBN = "3-540-51084-2", ISBN-13 = "978-3-540-51084-0", LCCN = "QA76.95 .I571 1988", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "Conference held jointly with AAECC-6.", series = ser-LNCS, abstract = "The following topics were dealt with: differential algebra; applications; Gr{\"o}bner bases; differential equations; algorithmic number theory; algebraic geometry; computational geometry; computational logic; systems; and arithmetic.", acknowledgement = ack-nhfb, classification = "C1110 (Algebra); C4100 (Numerical analysis); C7310 (Mathematics)", confdate = "4--8 July 1988", conflocation = "Rome, Italy", keywords = "Differential algebra; Applications; Gr{\"o}bner bases; Differential equations; Algorithmic number theory; Algebraic geometry; Computational geometry; Computational logic; Systems; Arithmetic", pubcountry = "West Germany", thesaurus = "Algebra; Computational geometry; Differential equations; Formal logic; Mathematics computing; Theorem proving", } @Proceedings{Gonnet:1989:PAI, editor = "Gaston H. Gonnet", booktitle = "{Proceedings of the ACM-SIGSAM 1989 International Symposium on Symbolic and Algebraic Computation: ISSAC '89 / July 17--19, 1989, Portland, Oregon}", title = "{Proceedings of the ACM-SIGSAM 1989 International Symposium on Symbolic and Algebraic Computation: ISSAC '89 / July 17--19, 1989, Portland, Oregon}", publisher = pub-ACM, address = pub-ACM:adr, pages = "399", year = "1989", ISBN = "0-89791-325-6", ISBN-13 = "978-0-89791-325-6", LCCN = "QA76.95.I59 1989", bibdate = "Thu Sep 26 06:21:35 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number: 505890. English and French.", price = "US\$29.00", abstract = "The following topics were dealt with: differential equations; linear difference equations; functional equivalence; series solutions; factorization; Las Vegas primality test; matrix algebra; rational mappings; Knuth--Bendix procedure and Buchberger algorithm; symbolic algebra; lockup tables; derivations polynomials; Pad{\'e}--Hermite Forms; $p$-adic approximations; nonlinear equations; defect; Sturm--Habicht sequence; MINION; REDUCE; code optimization; IRENA; MACSYMA; GENCRAY; AIPI; Fourier series; functions; integration; education; stability; normal forms; curves; geometry; root isolation; triangle inequalities; parallel algorithms; rewriting systems; and theorem proving.", acknowledgement = ack-nhfb, classification = "C1110 (Algebra); C1120 (Analysis); C4100 (Numerical analysis); C4200 (Computer theory); C7310 (Mathematics)", confdate = "17--19 July 1989", conflocation = "Portland, OR, USA", confsponsor = "ACM", keywords = "AIPI; algebra --- data processing --- congresses; Buchberger algorithm; Code optimization; computational complexity --- congresses; Curves; Defect; Derivations; Differential equations; Education; Factorization; Fourier series; Functional equivalence; Functions; GENCRAY; Geometry; Integration; IRENA; Knuth--Bendix procedure; Las Vegas primality test; Linear difference equations; Lockup tables; MACSYMA; Matrix algebra; MINION; Nonlinear equations; Normal forms; P-adic approximations; Pad{\'e}--Hermite Forms; Parallel algorithms; Polynomials; Rational mappings; REDUCE; Rewriting systems; Root isolation; Series solutions; Stability; Sturm--Habicht sequence; Symbolic algebra; Theorem proving, mathematics --- data processing --- congresses; Triangle inequalities", pubcountry = "USA", thesaurus = "Algebra; Computation theory; Functions; Mathematics computing; Numerical analysis; Series [mathematics]; Symbol manipulation", } @Proceedings{Mora:1989:AAA, editor = "T. Mora", booktitle = "{Applied Algebra, Algebraic Algorithms and Error-Correcting Codes. 6th International Conference, AAECC-6, Rome, Italy, July 4--8, 1988. Proceedings}", title = "{Applied Algebra, Algebraic Algorithms and Error-Correcting Codes. 6th International Conference, AAECC-6, Rome, Italy, July 4--8, 1988. Proceedings}", volume = "357", publisher = pub-SV, address = pub-SV:adr, pages = "ix + 480", year = "1989", ISBN = "3-540-51083-4", ISBN-13 = "978-3-540-51083-3", LCCN = "QA268 .A35 1988", bibdate = "Tue Sep 17 06:46:18 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "Conference held jointly with ISSAC '88.", series = "Lecture Notes in Computer Science", acknowledgement = ack-nhfb, confdate = "4--8 July 1988", conflocation = "Rome, Italy", pubcountry = "West Germany", } @Proceedings{Watanabe:1990:IPI, editor = "Shunro Watanabe and Morio Nagata", booktitle = "{ISSAC '90: proceedings of the International Symposium on Symbolic and Algebraic Computation: August 20--24, 1990, Tokyo, Japan}", title = "{ISSAC '90: proceedings of the International Symposium on Symbolic and Algebraic Computation: August 20--24, 1990, Tokyo, Japan}", publisher = pub-ACM # " and " # pub-AW, address = pub-ACM:adr # " and " # pub-AW:adr, pages = "ix + 307", year = "1990", ISBN = "0-89791-401-5 (ACM), 0-201-54892-5 (Addison-Wesley)", ISBN-13 = "978-0-89791-401-7 (ACM), 978-0-201-54892-1 (Addison-Wesley)", LCCN = "QA76.95 .I57 1990", bibdate = "Thu Sep 26 06:00:06 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", abstract = "The following topics were dealt with: foundations of symbolic computation; computational logics; systems; algorithms on polynomials; integration and differential equations; and algorithms on geometry.", acknowledgement = ack-nhfb, classification = "C4210 (Formal logic); C4240 (Programming and algorithm theory)", confdate = "20--24 Aug. 1990", conflocation = "Tokyo, Japan", confsponsor = "Inf. Processing Soc. Japan; Japan Soc. Software Sci. Technol.; ACM", keywords = "algebra --- data processing --- congresses; Algorithms; Computational geometry; Computational logics; Differential equations; Geometry; Integration; mathematics --- data processing --- congresses; Polynomials; Symbolic computation; Systems", pubcountry = "USA", thesaurus = "Algorithm theory; Computational geometry; Formal logic; Symbol manipulation", } @Proceedings{Watt:1991:IPI, editor = "Stephen M. Watt", booktitle = "{ISSAC '91: proceedings of the 1991 International Symposium on Symbolic and Algebraic Computation, July 15--17, 1991, Bonn, Germany}", title = "{ISSAC '91: proceedings of the 1991 International Symposium on Symbolic and Algebraic Computation, July 15--17, 1991, Bonn, Germany}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xiii + 468", year = "1991", ISBN = "0-89791-437-6", ISBN-13 = "978-0-89791-437-6", LCCN = "QA 76.95 I59 1991", bibdate = "Thu Sep 26 06:00:06 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/magma.bib", abstract = "The following topics were dealt with: algorithms for symbolic mathematical computation; languages, systems and packages; computational geometry, group theory and number theory; automatic theorem proving and programming; interface of symbolics, numerics and graphics; applications in mathematics, science and engineering; and symbolic and algebraic computation in education.", acknowledgement = ack-nhfb, classification = "C1160 (Combinatorial mathematics); C4130 (Interpolation and function approximation); C4210 (Formal logic); C4240 (Programming and algorithm theory); C7310 (Mathematics)", confdate = "15--17 July 1991", conflocation = "Bonn, Germany", confsponsor = "ACM", keywords = "algebra --- data processing --- congresses; Algebraic computation; Algorithms; Automatic theorem proving; Computational geometry; Education; Engineering; Graphics; Group theory; Languages; Mathematics; mathematics --- data processing --- congresses; Number theory; Programming; Science; Symbolic mathematical computation; Symbolics", pubcountry = "USA", thesaurus = "Computational complexity; Formal languages; Interpolation; Number theory; Polynomials; Symbol manipulation", } @Proceedings{Wang:1992:PII, editor = "Paul S. Wang", booktitle = "{Proceedings of ISSAC '92. International Symposium on Symbolic and Algebraic Computation}", title = "{Proceedings of ISSAC '92. International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "ix + 406", year = "1992", ISBN = "0-89791-489-9 (soft cover), 0-89791-490-2 (hard cover)", ISBN-13 = "978-0-89791-489-5 (soft cover), 978-0-89791-490-1 (hard cover)", LCCN = "QA76.95.I59 1992", bibdate = "Thu Sep 26 05:51:45 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/fparith.bib; http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number: 505920.", abstract = "The following topics were dealt with: symbolic computation; differential equations; differs-integral software; algebraic algorithms; algebraic software; real algebraics and root isolation; groups and number theory; systems and interfaces.", acknowledgement = ack-nhfb, classification = "C6130 (Data handling techniques); C7310 (Mathematics)", confdate = "27--29 July 1992", conflocation = "Berkeley, CA, USA", confsponsor = "ACM", keywords = "Algebraic algorithms; Algebraic software; Differential equations; Differs-integral software; Groups theory; Interfaces; Number theory; Real algebraics; Root isolation; Symbolic computation", pubcountry = "USA", thesaurus = "Differential equations; Mathematics computing; Symbol manipulation", } @Proceedings{ACM:1993:PFA, editor = "{ACM}", booktitle = "{Proceedings of the Fourth ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, PPOPP: San Diego, California, May 19--22, 1993}", title = "{Proceedings of the Fourth ACM SIGPLAN Symposium on Principles and Practice of Parallel Programming, PPOPP: San Diego, California, May 19--22, 1993}", volume = "28(7)", publisher = pub-ACM, address = pub-ACM:adr, pages = "ix + 259", year = "1993", ISBN = "0-89791-589-5", ISBN-13 = "978-0-89791-589-2", ISSN = "0362-1340 (print), 1523-2867 (print), 1558-1160 (electronic)", ISSN-L = "0362-1340", LCCN = "QA76.642.A27 1993", bibdate = "Thu Mar 12 11:28:58 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "ACM SIGPLAN Notices", acknowledgement = ack-nhfb, sponsor = "Association for Computing Machinery; Special Interest Group on Programming Languages.", standardno = "1", } @Proceedings{Bronstein:1993:IPI, editor = "Manuel Bronstein", booktitle = "{ISSAC'93: proceedings of the 1993 International Symposium on Symbolic and Algebraic Computation, July 6--8, 1993, Kiev, Ukraine}", title = "{ISSAC'93: proceedings of the 1993 International Symposium on Symbolic and Algebraic Computation, July 6--8, 1993, Kiev, Ukraine}", publisher = pub-ACM, address = pub-ACM:adr, pages = "viii + 321", year = "1993", ISBN = "0-89791-604-2", ISBN-13 = "978-0-89791-604-2", LCCN = "QA 76.95 I59 1993", bibdate = "Thu Sep 26 05:45:15 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number: 505930.", abstract = "The following topics were dealt with: algebraic solutions of equations; computer algebra systems; algorithm theory and complexity; automated theorem proving; polynomials; and matrix algebra.", acknowledgement = ack-nhfb, classification = "C4210 (Formal logic); C4240 (Programming and algorithm theory); C7310 (Mathematics computing)", confdate = "6--8 July 1993", conflocation = "Kiev, Ukraine", confsponsor = "ACM", keywords = "algebra --- data processing --- congresses; Algorithm theory; Automated theorem proving; Complexity; Computer algebra; mathematics --- data processing --- congresses; Matrix algebra; Polynomials", pubcountry = "USA", source = "ISSAC '93", sponsor = "Association for Computing Machinery.", thesaurus = "Computational complexity; Mathematics computing; Matrix algebra; Polynomials; Symbol manipulation; Theorem proving", } @Proceedings{Halstead:1993:PSC, editor = "Robert H. Halstead and Takayasu Ito", booktitle = "{Parallel symbolic computing: languages, systems, and applications: US\slash Japan workshop, Cambridge, MA, USA, October 14--17, 1992: proceedings}", title = "{Parallel symbolic computing: languages, systems, and applications: US\slash Japan workshop, Cambridge, MA, USA, October 14--17, 1992: proceedings}", number = "748", publisher = pub-SV, address = pub-SV:adr, pages = "x + 417", year = "1993", ISBN = "0-387-57396-8, 3-540-57396-8", ISBN-13 = "978-0-387-57396-0, 978-3-540-57396-8", ISSN = "0302-9743 (print), 1611-3349 (electronic)", LCCN = "QA76.58.P3785 1993", bibdate = "Thu Mar 12 11:28:58 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, } @Proceedings{Sincovec:1993:PSS, editor = "Richard F. Sincovec", booktitle = "{Proceedings of the Sixth SIAM Conference on Parallel Processing for Scientific Computing, Norfolk, VA, March, 1993}", title = "{Proceedings of the Sixth SIAM Conference on Parallel Processing for Scientific Computing, Norfolk, VA, March, 1993}", publisher = pub-SIAM, address = pub-SIAM:adr, pages = "xix + 1041 + iv", year = "1993", ISBN = "0-89871-315-3", ISBN-13 = "978-0-89871-315-2", LCCN = "QA76.58.S55 1993", bibdate = "Thu Mar 12 11:28:58 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "Two volumes.", acknowledgement = ack-nhfb, sponsor = "Society for Industrial and Applied Mathematics.", } @Proceedings{ACM:1994:IPI, editor = "{ACM}", booktitle = "{ISSAC '94: Proceedings of the 1994 International Symposium on Symbolic and Algebraic Computation: July 20--22, 1994, Oxford, England, United Kingdom}", title = "{ISSAC '94: Proceedings of the 1994 International Symposium on Symbolic and Algebraic Computation: July 20--22, 1994, Oxford, England, United Kingdom}", publisher = pub-ACM, address = pub-ACM:adr, pages = "ix + 359", year = "1994", ISBN = "0-89791-638-7", ISBN-13 = "978-0-89791-638-7", LCCN = "QA76.95.I59 1994", bibdate = "Thu Sep 26 05:45:15 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, confdate = "20--22 July 1994", conflocation = "Oxford, UK", confsponsor = "ACM", pubcountry = "USA", } @Proceedings{Adleman:1994:ANT, editor = "L. M. Adleman and M.-D. Huang", booktitle = "{Algorithmic Number Theory. First International Symposium, ANTS-I. Proceedings}", title = "{Algorithmic Number Theory. First International Symposium, ANTS-I. Proceedings}", publisher = pub-SV, address = pub-SV:adr, pages = "ix + 322", year = "1994", ISBN = "0-387-58691-1 (New York), 3-540-58691-1 (Berlin)", ISBN-13 = "978-0-387-58691-5 (New York), 978-3-540-58691-3 (Berlin)", LCCN = "QA241.A43 1994", bibdate = "Thu Sep 26 05:50:11 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, confdate = "6--9 May 1994", conflocation = "Ithaca, NY, USA", pubcountry = "Germany", } @Proceedings{Hong:1994:FIS, editor = "Hoon Hong", booktitle = "{First International Symposium on Parallel Symbolic Computation, PASCO '94, Hagenberg\slash Linz, Austria, September 26--28, 1994}", title = "{First International Symposium on Parallel Symbolic Computation, PASCO '94, Hagenberg\slash Linz, Austria, September 26--28, 1994}", volume = "5", publisher = pub-WORLD-SCI, address = pub-WORLD-SCI:adr, pages = "xiii + 431", year = "1994", ISBN = "981-02-2040-5", ISBN-13 = "978-981-02-2040-2", LCCN = "QA76.642.I58 1994", bibdate = "Thu Mar 12 07:55:38 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "Lecture notes series in computing", acknowledgement = ack-nhfb, alttitle = "Parallel symbolic computation", keywords = "Parallel programming (Computer science) --- Congresses.", } @Proceedings{Aityan:1995:PNP, editor = "S. K. Aityan", booktitle = "{Proceedings of neural, parallel and scientific computations: proceedings of the First International Conference on Neural, Parallel and Scientific Computations held at Morehouse College, Atlanta, USA, May 28--31, 1995}", title = "{Proceedings of neural, parallel and scientific computations: proceedings of the First International Conference on Neural, Parallel and Scientific Computations held at Morehouse College, Atlanta, USA, May 28--31, 1995}", volume = "1", publisher = "Dynamic Publishers, Inc", address = "Atlanta, GA", pages = "xi + 506", year = "1995", ISBN = "0-9640398-9-3, 0-9640398-8-5", ISBN-13 = "978-0-9640398-9-6, 978-0-9640398-8-9", LCCN = "QA76.87 .I58 1995", bibdate = "Sat Mar 11 16:48:03 2000", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "Proceedings of Neural Parallel and Scientific Computations", acknowledgement = ack-nhfb, } @Proceedings{Ferreira:1995:PAI, editor = "Afonso Ferreira and Jose D. P. Rolim", booktitle = "{Parallel algorithms for irregularly structured problems: second international workshop, IRREGULAR 95, Lyon, France, September 4--6, 1995: proceedings}", title = "{Parallel algorithms for irregularly structured problems: second international workshop, IRREGULAR 95, Lyon, France, September 4--6, 1995: proceedings}", volume = "980", publisher = pub-SV, address = pub-SV:adr, pages = "x + 409", year = "1995", CODEN = "LNCSD9", ISBN = "3-540-60321-2", ISBN-13 = "978-3-540-60321-4", ISSN = "0302-9743 (print), 1611-3349 (electronic)", LCCN = "QA76.642 .I59 1995", bibdate = "Fri Apr 12 07:41:32 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, keywords = "computer algorithms --- congresses; parallel programming (computer science) --- congresses", xxvolume = "4005092982", } @Proceedings{IEEE:1995:PEI, editor = "{IEEE}", booktitle = "{Proceedings of the Eighth IEEE Symposium on Computer-Based Medical Systems / June 9--10, 1995, Lubbock, Texas}", title = "{Proceedings of the Eighth IEEE Symposium on Computer-Based Medical Systems / June 9--10, 1995, Lubbock, Texas}", publisher = pub-IEEE, address = pub-IEEE:adr, pages = "x + 348", year = "1995", ISBN = "0-8186-7117-3", ISBN-13 = "978-0-8186-7117-3", LCCN = "R858.A2 I155 1995", bibdate = "Thu Sep 26 05:45:15 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "IEEE catalog number 95CH35813.", acknowledgement = ack-nhfb, confdate = "9--10 June 1995", conflocation = "Lubbock, TX, USA", confsponsor = "IEEE Comput. Soc. Tech. Committee on Comput. Med.; IEEE South Plains Sect.; SPIE - Int. Soc. Opt. Eng.; Texas Tech Univ.; Texas Tech Univ. Health Sci. Center", pubcountry = "USA", } @Proceedings{Levelt:1995:IPI, editor = "A. H. M. Levelt", booktitle = "{ISSAC '95: Proceedings of the 1995 International Symposium on Symbolic and Algebraic Computation: July 10--12, 1995, Montr{\'e}al, Canada}", title = "{ISSAC '95: Proceedings of the 1995 International Symposium on Symbolic and Algebraic Computation: July 10--12, 1995, Montr{\'e}al, Canada}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xviii + 314", year = "1995", ISBN = "0-89791-699-9", ISBN-13 = "978-0-89791-699-8", LCCN = "QA 76.95 I59 1995", bibdate = "Thu Sep 26 05:34:21 MDT 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number: 505950", series = "ISSAC -PROCEEDINGS- 1995", abstract = "The following topics were dealt with: differential equations; visualisation; algebraic numbers; algorithms; systems; polynomial and differential algebra; seminumerical methods; greatest common divisors; and.", acknowledgement = ack-nhfb, classification = "C4100 (Numerical analysis); C4170 (Differential equations); C7310 (Mathematics computing)", confdate = "10--12 July 1995", conflocation = "Montr{\'e}al, Que., Canada", confsponsor = "ACM", keywords = "algebra --- data processing --- congresses; Algebraic numbers; Algorithms; Differential algebra; Differential equations; Greatest common divisors; mathematics --- data processing --- congresses; Polynomial; Seminumerical methods; Systems; Visualisation", pubcountry = "USA", source = "ISSAC '95", thesaurus = "Data visualisation; Differential equations; Group theory; Numerical analysis; Symbol manipulation", } @Proceedings{Briot:1996:OBP, editor = "Jean-Pierre Briot and Jean-Marc Geib and Akinori Yonezawa", booktitle = "{Object-based parallel and distributed computation: France--Japan Workshop, OBPDC '95, Tokyo, Japan, June 21--23, 1995: selected papers}", title = "{Object-based parallel and distributed computation: France--Japan Workshop, OBPDC '95, Tokyo, Japan, June 21--23, 1995: selected papers}", volume = "1107", publisher = pub-SV, address = pub-SV:adr, pages = "x + 348", year = "1996", ISBN = "3-540-61487-7 (softcover)", ISBN-13 = "978-3-540-61487-6 (softcover)", ISSN = "0302-9743 (print), 1611-3349 (electronic)", LCCN = "QA76.64 .F7 1995", bibdate = "Sat Dec 21 16:06:37 MST 1996", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, annote = "Data parallel programming in the parallel object-oriented language OCore / Hiroki Konaka \ldots{} [et al.] -- Polymorphic matrices in Paladin / Frederic Guidec and Jean-Marc Jezequel -- Programming and debugging for massive parallelism: the case for a parallel object-oriented language A-NETL / Takanobu Baba, Tsutomu Yoshinaga, and Takahiro Furuta -- Schematic: a concurrent object-oriented extension to Scheme / Kenjiro Taura and Akinori Yonezawa -- (Thread and object)-oriented distributed programming / Jean-Marc Geib \ldots{} [et al.] -- Distributed and object oriented symbolic programming in April / Keith L. Clark and Frank G. McCabe -- Reactive programming Eiffel// / Denis Caromel and Yves Roudier -- Proofs, concurrent objects, and computations in a FILL framework / Didier Galmiche and Eric Boudinet -- Modular description and verification of concurrent objects / Jean-Paul Bahsoun, Stephan Merz, and Corinne Servieres - - CHORUS/COOL: CHORUS object oriented technology / Christian Jacquemot, Peter Strarup Jensen, and Stephane Carrez -- Adaptive operating system design using reflection / Rodger Lea, Yasuhiko Yokote, and Jun-ichiro Itoh -- Isatis: a customizable distributed object-based runtime system / Michel Ban{\^a}tre \ldots{} [et al.] -- Lessons from designing and implementing GARF / Rachid Guerraoui, Benoit Garbinato, and Karim Mazouni -- Design and implementation of DROL runtime environment on real-time Mach kernel / Kazunori Takashio, Hidehisa Shitomi, and Mario Tokoro -- ActNet: the actor model applied to mobile robotic environments / Philippe Darche, Pierre-Guillaume Raverdy, and Eric Commelin -- Component-based programming and application management with Olan / Luc Bellissard \ldots{} [et al.] -- The version management architecture of an object-oriented distributed systems environment: OZ++ / Michiharu Tsukamoto \ldots{} [et al.] -- Formal semantics of agent evolution in language Flage / Yasuyuki Tahara \ldots{} [et al.].", keywords = "Electronic data processing -- Distributed processing; Object-oriented programming (Computer science); Parallel processing (Electronic computers)", } @Proceedings{Calmet:1996:DIS, editor = "Jacques Calmet and Carla Limongelli", booktitle = "{Design and implementation of symbolic computation systems: International Symposium, DISCO '96, Karlsruhe, Germany, September 18--20, 1996: proceedings}", title = "{Design and implementation of symbolic computation systems: International Symposium, DISCO '96, Karlsruhe, Germany, September 18--20, 1996: proceedings}", volume = "1128", publisher = pub-SV, address = pub-SV:adr, pages = "ix + 356", year = "1996", ISBN = "3-540-61697-7 (softcover)", ISBN-13 = "978-3-540-61697-9 (softcover)", ISSN = "0302-9743 (print), 1611-3349 (electronic)", LCCN = "QA76.9.S88I576 1996", bibdate = "Thu Mar 12 12:25:22 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, keywords = "Automatic theorem proving --- Congresses.; Mathematics --- Data processing --- Congresses.; System design --- Congresses.", } @Proceedings{LakshmanYN:1996:IPI, editor = "{Lakshman Y. N.}", booktitle = "{ISSAC '96: Proceedings of the 1996 International Symposium on Symbolic and Algebraic Computation, July 24--26, 1996, Zurich, Switzerland}", title = "{ISSAC '96: Proceedings of the 1996 International Symposium on Symbolic and Algebraic Computation, July 24--26, 1996, Zurich, Switzerland}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xvii + 313", year = "1996", ISBN = "0-89791-796-0", ISBN-13 = "978-0-89791-796-4", LCCN = "QA 76.95 I59 1996", bibdate = "Thu Mar 12 08:00:14 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, sponsor = "ACM; Special Interest Group in Symbolic and Algebraic Manipulation (SIGSAM). ACM; Special Interest Group on Numerical Mathematics (SIGNUM).", } @Proceedings{Baral:1997:LPN, editor = "C. Baral and V. S. Kreinovich and V. Lifschitz and M. Gelfond", booktitle = "{Logic programming, non-monotonic reasoning and reasoning about actions: Symposium --- November 1995, El Paso, TX}", title = "{Logic programming, non-monotonic reasoning and reasoning about actions: Symposium --- November 1995, El Paso, TX}", volume = "21(2)", publisher = "Baltzer Science", address = "Basel, Switzerland", pages = "????", year = "1997", ISSN = "1012-2443", bibdate = "Thu Mar 12 11:28:58 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "Annals of Mathematics and Artificial Intelligence", acknowledgement = ack-nhfb, } @Proceedings{Kuchlin:1997:PPS, editor = "Wolfgang W. K{\"u}chlin", booktitle = "{ISSAC 97: July 21--23, 1997, Maui, Hawaii, USA: proceedings of the 1997 International Symposium on Symbolic and Algebraic Computation}", title = "{ISSAC 97: July 21--23, 1997, Maui, Hawaii, USA: proceedings of the 1997 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xxii + 414", year = "1997", ISBN = "0-89791-875-4", ISBN-13 = "978-0-89791-875-6", LCCN = "QA76.95", bibdate = "Sat Mar 23 12:41:32 2002", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.acm.org/pubs/contents/proceedings/issac/258726/", acknowledgement = ack-nhfb, } @Proceedings{Lengauer:1997:EPP, editor = "Christian Lengauer and Martin Griebl and Sergei Gorlatch", booktitle = "{Euro-Par'97, parallel processing: third International Euro-Par Conference, Passau, Germany, August 26--29, 1997: proceedings}", title = "{Euro-Par'97, parallel processing: third International Euro-Par Conference, Passau, Germany, August 26--29, 1997: proceedings}", volume = "1300", publisher = pub-SV, address = pub-SV:adr, pages = "xxx + 1380", year = "1997", ISBN = "3-540-63440-1 (paperback)", ISBN-13 = "978-3-540-63440-9 (paperback)", LCCN = "QA76.58.I5535 1997", bibdate = "Mon Aug 25 10:50:15 MDT 1997", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = ser-LNCS, acknowledgement = ack-nhfb, keywords = "Parallel processing (Electronic computers) -- Congresses.", } @Proceedings{Buchberger:1998:YGB, editor = "Bruno Buchberger and Franz Winkler", booktitle = "33 years of Gr{\"o}bner bases: Gr{\"o}bner bases and applications: Conference --- February 1998, Linz, Austria", title = "33 years of Gr{\"o}bner bases: Gr{\"o}bner bases and applications: Conference --- February 1998, Linz, Austria", number = "251", publisher = pub-CAMBRIDGE, address = pub-CAMBRIDGE:adr, pages = "viii + 552", year = "1998", ISBN = "0-521-63298-6", ISBN-13 = "978-0-521-63298-0", LCCN = "QA251.3.G76 1998", bibdate = "Thu Mar 12 11:28:58 MST 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", series = "London Mathematical Society Lecture Note Series", acknowledgement = ack-nhfb, sponsor = "Research Institute for Symbolic Computation.", } @Proceedings{Gloor:1998:IPI, editor = "Oliver Gloor", booktitle = "{ISSAC 98: Proceedings of the 1998 International Symposium on Symbolic and Algebraic Computation, August 13--15, 1998, University of Rostock, Germany}", title = "{ISSAC 98: Proceedings of the 1998 International Symposium on Symbolic and Algebraic Computation, August 13--15, 1998, University of Rostock, Germany}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xxii + 327", year = "1998", ISBN = "1-58113-002-3", ISBN-13 = "978-1-58113-002-7", LCCN = "QA155.7.E4 E88 1998", bibdate = "Wed Sep 16 17:13:58 1998", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @Proceedings{Dooley:1999:IJS, editor = "Sam Dooley", booktitle = "{ISSAC 99: July 29--31, 1999, Simon Fraser University, Vancouver, BC, Canada: proceedings of the 1999 International Symposium on Symbolic and Algebraic Computation}", title = "{ISSAC 99: July 29--31, 1999, Simon Fraser University, Vancouver, BC, Canada: proceedings of the 1999 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xxii + 311", year = "1999", ISBN = "1-58113-073-2", ISBN-13 = "978-1-58113-073-7", LCCN = "QA76.95 .I57 1999", bibdate = "Sat Mar 11 16:51:59 2000", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @Proceedings{Traverso:2000:IAU, editor = "Carlo Traverso", booktitle = "{ISSAC 2000: 7--9 August 2000, University of St. Andrews, Scotland: proceedings of the 2000 International Symposium on Symbolic and Algebraic Computation}", title = "{ISSAC 2000: 7--9 August 2000, University of St. Andrews, Scotland: proceedings of the 2000 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "viii + 309", year = "2000", ISBN = "1-58113-218-2", ISBN-13 = "978-1-58113-218-2", LCCN = "QA76.95.I59 2000", bibdate = "Tue Apr 17 09:12:53 2001", bibsource = "http://www.acm.org/pubs/contents/proceedings/series/issac/; http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number 505000.", URL = "http://www.acm.org/pubs/contents/proceedings/issac/345542/", acknowledgement = ack-nhfb, } @Proceedings{Mourrain:2001:IJU, editor = "Bernard Mourrain", booktitle = "{ISSAC 2001: July 22--25, 2001, University of Western Ontario, London, Ontario, Canada: proceedings of the 2001 International Symposium on Symbolic and Algebraic Computation}", title = "{ISSAC 2001: July 22--25, 2001, University of Western Ontario, London, Ontario, Canada: proceedings of the 2001 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xii + 352", year = "2001", ISBN = "1-58113-417-7", ISBN-13 = "978-1-58113-417-9", LCCN = "QA76.95.I59 2001", bibdate = "Wed May 15 14:30:19 2002", bibsource = "http://www.acm.org/pubs/contents/proceedings/series/issac/; http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number 505010.", acknowledgement = ack-nhfb, } @Proceedings{Mora:2002:IPI, editor = "Teo Mora", booktitle = "{ISSAC 2002: Proceedings of the 2002 International Symposium on Symbolic and Algebraic Computation, July 07--10, 2002, Universit{\'e} de Lille, Lille, France}", title = "{ISSAC 2002: Proceedings of the 2002 International Symposium on Symbolic and Algebraic Computation, July 07--10, 2002, Universit{\'e} de Lille, Lille, France}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xx + 276", year = "2002", ISBN = "1-58113-484-3", ISBN-13 = "978-1-58113-484-1", LCCN = "QA76.95", bibdate = "Fri Nov 22 16:20:31 2002", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", URL = "http://www.lifl.fr/ISSAC2002/", acknowledgement = ack-nhfb, } @Proceedings{Senda:2003:IPI, editor = "J. Rafael Senda", booktitle = "{ISSAC 2003: Proceedings of the 2003 International Symposium on Symbolic and Algebraic Computation, August 3--6, 2003, Drexel University, Philadelphia, PA, USA}", title = "{ISSAC 2003: Proceedings of the 2003 International Symposium on Symbolic and Algebraic Computation, August 3--6, 2003, Drexel University, Philadelphia, PA, USA}", publisher = pub-ACM, address = pub-ACM:adr, pages = "x + 273", year = "2003", ISBN = "1-58113-641-2", ISBN-13 = "978-1-58113-641-8", LCCN = "QA76.95", bibdate = "Sat Dec 13 18:18:22 2003", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number 505030.", acknowledgement = ack-nhfb, } @Proceedings{Gutierrez:2004:IJU, editor = "Jaime Gutierrez", booktitle = "{ISAAC 2004: July 4--7, 2004, University of Cantabria, Santander, Spain: proceedings of the 2004 International Symposium on Symbolic and Algebraic Computation}", title = "{ISAAC 2004: July 4--7, 2004, University of Cantabria, Santander, Spain: proceedings of the 2004 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xii + 328", year = "2004", ISBN = "1-58113-827-X", ISBN-13 = "978-1-58113-827-6", LCCN = "QA76.95 .I57 2004", bibdate = "Fri Oct 21 06:33:01 MDT 2005", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib; z3950.loc.gov:7090/Voyager", acknowledgement = ack-nhfb, meetingname = "International Symposium on Symbolic and Algebraic Computation (2004 : Santander, Spain)", } @Proceedings{Kauers:2005:IJB, editor = "Manuel Kauers", booktitle = "{ISSAC '05: July 24--27, 2005, Beijing, China: Proceedings of the 2005 International Symposium on Symbolic and Algebraic Computation}", title = "{ISSAC '05: July 24--27, 2005, Beijing, China: Proceedings of the 2005 International Symposium on Symbolic and Algebraic Computation}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xiv + 372", year = "2005", ISBN = "1-59593-095-7", ISBN-13 = "978-1-59593-095-8", LCCN = "????", bibdate = "Fri Oct 21 07:01:24 2005", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM Order Number 505050.", acknowledgement = ack-nhfb, } @Proceedings{Trager:2006:PIS, editor = "Barry Trager", booktitle = "{Proceedings of the 2006 International Symposium on Symbolic and Algebraic Computation, Genoa, Italy July 09--12, 2006}", title = "{Proceedings of the 2006 International Symposium on Symbolic and Algebraic Computation, Genoa, Italy July 09--12, 2006}", publisher = pub-ACM, address = pub-ACM:adr, pages = "????", year = "2006", ISBN = "1-59593-276-3", ISBN-13 = "978-1-59593-276-1", LCCN = "????", bibdate = "Wed Aug 23 09:44:27 2006", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", note = "ACM order number 505060.", acknowledgement = ack-nhfb, } @Proceedings{Brown:2007:PIS, editor = "C. W. Brown", booktitle = "{Proceedings of the 2007 International Symposium on Symbolic and Algebraic Computation, July 29--August 1, 2007, University of Waterloo, Waterloo, Ontario, Canada}", title = "{Proceedings of the 2007 International Symposium on Symbolic and Algebraic Computation, July 29--August 1, 2007, University of Waterloo, Waterloo, Ontario, Canada}", publisher = pub-ACM, address = pub-ACM:adr, pages = "????", year = "2007", ISBN = "1-59593-743-9 (print), 1-59593-742-0 (CD-ROM)", ISBN-13 = "978-1-59593-743-8 (print), 978-1-59593-742-1 (CD-ROM)", LCCN = "QA76.5 S98 2007", bibdate = "Fri Jun 20 08:53:37 2008", bibsource = "http://www.math.utah.edu/pub/tex/bib/axiom.bib; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib", note = "ACM order number 505070.", acknowledgement = ack-nhfb, } @Proceedings{Jeffrey:2008:PAM, editor = "David Jeffrey", booktitle = "{Proceedings of the 21st annual meeting of the International Symposium on Symbolic Computation, ISSAC 2008, July 20--23, 2008, Hagenberg, Austria}", title = "{Proceedings of the 21st annual meeting of the International Symposium on Symbolic Computation, ISSAC 2008, July 20--23, 2008, Hagenberg, Austria}", publisher = pub-ACM, address = pub-ACM:adr, pages = "x + 338", year = "2008", ISBN = "1-59593-904-0", ISBN-13 = "978-1-59593-904-3", LCCN = "????", bibdate = "Fri Jun 20 08:53:37 2008", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @Proceedings{May:2009:PIS, editor = "John P. May", booktitle = "{Proceedings of the 2009 international symposium on Symbolic and algebraic computation, KIAS, Seoul, Korea, July 28--31, 2009}", title = "{Proceedings of the 2009 international symposium on Symbolic and algebraic computation, KIAS, Seoul, Korea, July 28--31, 2009}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xi + 389", year = "2009", ISBN = "1-60558-609-9", ISBN-13 = "978-1-60558-609-0", LCCN = "????", bibdate = "Fri Jun 20 08:53:37 2009", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @Proceedings{Watt:2010:IPI, editor = "Stephen M. Watt", booktitle = "{ISSAC 2010: Proceedings of the 2010 International Symposium on Symbolic and Algebraic Computation, July 25--28, 2010, Munich, Germany}", title = "{ISSAC 2010: Proceedings of the 2010 International Symposium on Symbolic and Algebraic Computation, July 25--28, 2010, Munich, Germany}", publisher = pub-ACM, address = pub-ACM:adr, pages = "xiv + 363", year = "2010", ISBN = "1-4503-0150-9", ISBN-13 = "978-1-4503-0150-3", LCCN = "QA76.95 .I59 2010", bibdate = "Fri Jun 17 08:11:01 2011", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib", acknowledgement = ack-nhfb, } @Proceedings{Schost:2011:IPI, editor = "{\'E}ric Schost and Ioannis Z. Emiris", booktitle = "{ISSAC 2011: Proceedings of the 2011 International Symposium on Symbolic and Algebraic Computation, June 7--11, 2011, San Jose, CA, USA}", title = "{ISSAC 2011: Proceedings of the 2011 International Symposium on Symbolic and Algebraic Computation, June 7--11, 2011, San Jose, CA, USA}", publisher = pub-ACM, address = pub-ACM:adr, pages = "362 (est.)", year = "2011", ISBN = "1-4503-0675-6", ISBN-13 = "978-1-4503-0675-1", LCCN = "QA76.95 .I59 2011", bibdate = "Fri Mar 14 12:24:11 2014", bibsource = "http://www.math.utah.edu/pub/tex/bib/elefunt.bib; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib; http://www.math.utah.edu/pub/tex/bib/mathematica.bib", acknowledgement = ack-nhfb, } @Proceedings{vanderHoeven:2012:IPI, editor = "Joris van der Hoeven and Mark van Hoeij", booktitle = "{ISSAC 2012: Proceedings of the 2012 International Symposium on Symbolic and Algebraic Computation, July 22--25, 2012, Grenoble, France}", title = "{ISSAC 2012: Proceedings of the 2012 International Symposium on Symbolic and Algebraic Computation, July 22--25, 2012, Grenoble, France}", publisher = pub-ACM, address = pub-ACM:adr, pages = "????", year = "2012", ISBN = "1-4503-1269-1", ISBN-13 = "978-1-4503-1269-1", LCCN = "QA76.95 .I59 2012", bibdate = "Fri Mar 14 12:24:11 2014", bibsource = "http://www.math.utah.edu/pub/tex/bib/hash.bib; http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib; http://www.math.utah.edu/pub/tex/bib/mathematica.bib", acknowledgement = ack-nhfb, } @Proceedings{Monagan:2013:IPI, editor = "Michael Monagan and Gene Cooperman and Mark Giesbrecht", booktitle = "{ISSAC 2013: Proceedings of the 2013 International Symposium on Symbolic and Algebraic Computation, June 26--29, 2013, Boston, MA, USA}", title = "{ISSAC 2013: Proceedings of the 2013 International Symposium on Symbolic and Algebraic Computation, June 26--29, 2013, Boston, MA, USA}", publisher = pub-ACM, address = pub-ACM:adr, pages = "387 (est.)", year = "2013", ISBN = "1-4503-2059-7", ISBN-13 = "978-1-4503-2059-7", LCCN = "QA76.95 .I59 2013", bibdate = "Fri Mar 14 12:24:11 2014", bibsource = "http://www.math.utah.edu/pub/tex/bib/issac.bib; http://www.math.utah.edu/pub/tex/bib/maple-extract.bib; http://www.math.utah.edu/pub/tex/bib/mathematica.bib", acknowledgement = ack-nhfb, }