peter-2004.bib
@comment{{This file has been generated by bib2bib 1.99}}
@comment{{Command line: bib2bib -c 'year=2004 and not annote:"skip_html" and not annote:"unpublished"' -ob peter-2004.bib -oc peter-2004.cite peter.bib}}
@inproceedings{Baumgartner:Mediratta:ASPPlanningBidirectional:KBCS:2004,
author = {Peter Baumgartner and Anupam Mediratta},
title = {{I}mproving {S}table {M}odels {B}ased {P}lanning by {B}idirectional {S}earch},
booktitle = {International Conference on Knowledge Based Computer Systems (KBCS)},
optcrossref = {},
optkey = {},
optpages = {},
year = {2004},
opteditor = {},
optvolume = {},
optnumber = {},
optseries = {},
address = {Hyderabad, India},
month = {December},
optorganization = {},
optpublisher = {},
url = {kbcs.pdf},
abstract = {Solving AI planning problems by transformation into (normal)
logic
programs and computing answer sets (stable models) has gained
considerable interest over the last years. We investigate in this
context a classical AI search technique, bidirectional search, where
search is performed both from the initial facts towards the goal and
vice versa. Our contribution is to show how bidirectional search can
be realized in the logic programming/answer set paradigm to
planning. This seems not having been investigated so far. We report
on practical experiments on planning problems from an AIPS
competition and show how our approach helps speeding up the planning
process. We perceive our contribution mainly as a {\em technique\/} that
is compatible with and complementary to existing extensions and
improvements, rather than as a concrete planning system.}
}
@incollection{Baumgartner:Furbach:LivingBooksStrangeThings:JS60:2004,
author = {Peter Baumgartner and Ulrich Furbach},
title = {Living Books, Automated Deduction and other Strange Things},
booktitle = {Mechanizing Mathematical Reasoning:
Techniques, Tools and Applications --
Essays in honour of J{\"o}rg H. Siekmann},
volume = {2605},
series = {LNCS},
pages = {255-274},
publisher = {Springer-Verlag},
year = {2004},
editor = {Dieter Hutter and Werner Stephan},
url = {http://www.dfki.de/~hutter/js60/}
}
@article{Baumgartner:EtAl:LivingBook:JAR:2004,
author = {Peter Baumgartner and Ulrich Furbach and Margret
Gross-Hardt and Alex Sinner},
title = {{Living Book -- Deduction, Slicing, and Interaction}},
journal = {Journal of Automated Reasoning},
year = {2004},
publisher = {Kluwer Academic Publishers},
volume = {32},
number = {3},
pages = {259-286},
era = {A},
optmonth = {},
abstract = {The Living Book is a system for the management of
personalized and scenario specific teaching material. The
main goal of the system is to support the active,
explorative and self-determined learning in lectures,
tutorials and self study. Living Book includes a course on
``Logic for Computer Scientists'' with a uniform access to
various tools like theorem provers and an interactive
tableau editor. It is routinely used within teaching
undergraduate courses at our university. \par This paper
describes both, the Living Book together with its use of
theorem proving technology as a core component in the
knowledge management system (KMS), and the use of this new
concept in academic teaching. The KMS provides a {\em
scenario management\/} component where teachers may
describe those parts of given documents that are relevant
in order to achieve a certain learning goal. The task of
the KMS is to assemble new documents from a database of
elementary units called ``slices'' (definitions, theorems,
and so on) in a scenario-based way (like ``I want to
prepare for an exam and need to learn about resolution'').
\par The computation of such assemblies is carried out by a
model-generating theorem prover for first-order logic with
a default negation principle. Its input consists of meta
data that describe the dependencies between different
slices, and logic-programming style rules that describe the
scenario-specific composition of slices. Additionally,
users may assess what units they know or don't know. This
information is stored in a user model, which is taken into
account to compute a model that specifies the assembly of a
personalized document. \par This paper introduces the
e-learning context we are faced with, motivates our choice
of logic, sketches the newly developed calculus used in the
KMS. Finally, the application and evaluation of Living
Books is presented.}
}
@inproceedings{Baumgartner:Burchardt:LPFrameNet:JELIA:2004,
author = {Peter Baumgartner and Aljoscha Burchardt},
title = {{L}ogic {P}rogramming {I}nfrastructure for {I}nferences on
{F}rame{N}et},
booktitle = {Logics in Artificial Intelligence, Ninth European
Conference, JELIA'04},
optcrossref = {},
optkey = {},
pages = {591--603},
year = {2004},
editor = {Jos{\'e} Alferes and Jo{\~a}o Leite},
volume = {3229},
optnumber = {},
series = {Lecture Notes in Artificial Intelligence},
optaddress = {},
optmonth = {},
era = {A},
optorganization = {},
url = {jelia2004.pdf},
publisher = {Springer Verlag, Berlin, Heidelberg, New-York},
abstract = {The growing size of electronically available text corpora
like companies' intranets or the WWW has made
\emph{information access} a hot topic within Computational
Linguistics. Despite the success of statistical or keyword
based methods, deeper Knowledge Representation (KR)
techniques along with ``inference'' are often mentioned as
mandatory, e.g.\ within the Semantic Web context, to enable
e.g.\ better query answering based on ``semantical''
information. In this paper we try to contribute to the open
question how to operationalize semantic information on a
larger scale. As a basis we take the \emph{frame}
structures of the Berkeley FrameNet~II project, which is a
structured dictionary to explain the meaning of words from
a lexicographic perspective. Our main contribution is a
transformation of the FrameNet~II frames into the
\emph{answer set programming paradigm} of logic
programming. \par Because a number of different reasoning
tasks are subsumed under ``inference'' in the context of
natural language processing, we emphasize the flexibility
of our transformation. Together with methods for automatic
annotation of text documents with frame semantics which are
currently developed at various sites, we arrive at an
infrastructure that supports experimentation with semantic
information access as is currently demanded for.}
}
@inproceedings{Baumgartner:etal:ModelBasedSchemaReasoning:KI:2004,
author = {Peter Baumgartner and Ulrich Furbach and Margret
Gross-Hardt and Thomas Kleemann},
title = {Model Based Deduction for Database Schema Reasoning},
booktitle = {KI 2004: Advances in Artificial Intelligence},
optcrossref = {},
optkey = {},
pages = {168--182},
year = {2004},
editor = {Susanne Biundo and Thom Fr{\"u}hwirth and G{\"u}nther Palm},
volume = {3238},
optnumber = {},
optseries = {Lecture Notes in Computer Science},
optaddress = {},
optmonth = {},
optorganization = {},
url = {ki2004.pdf},
publisher = {Springer Verlag, Berlin, Heidelberg, New-York},
abstract = {We aim to demonstrate that automated deduction techniques,
in particular those following the model computation
paradigm, are very well suited for database schema/query
reasoning. Specifically, we present an approach to compute
completed paths for database or XPath queries. The database
schema and a query are transformed to disjunctive logic
programs with default negation, using a description logic
as an intermediate language. Our underlying deduction
system, {\em KRHyper\/}, then detects if a query is
satisfiable or not. In case of a satisfiable query, all
completed paths -- those that fulfill all given constraints
-- are returned as part of the computed models. \par The
purpose of computing completed paths is to reduce the
workload on a query processor. Without the path completion,
a usual XPath query processor would search the whole
database for solutions to the query, which need not be the
case when using completed paths instead. \par We understand
this paper as a first step, that covers a basic
schema/query reasoning task by model-based deduction. Due
to the underlying expressive logic formalism we expect our
approach to easily adapt to more sophisticated problem
settings, like type hierarchies as they evolve within the
XML world.}
}
@inproceedings{Baumgartner:etal:Darwin:ESFOR:2004,
author = {Peter Baumgartner and Alexander Fuchs and Cesare Tinelli},
title = {Darwin: A Theorem Prover for the Model Evolution
Calculus},
booktitle = {Proceedings of the 1st Workshop on Empirically
Successful First Order Reasoning (ESFOR'04), Cork,
Ireland, 2004},
optcrossref = {},
optkey = {},
optpages = {},
year = {2004},
url = {darwin.pdf},
editor = {Stephan Schulz and Geoff Sutcliffe and Tanel Tammet},
optvolume = {},
optnumber = {},
series = {Electronic Notes in Theoretical Computer Science},
abstract = {Darwin is the first implementation of the Model Evolution
Calculus by Baumgartner and Tinelli. The Model Evolution
Calculus lifts the DPLL procedure to first-order logic.
Darwin is meant to be a fast and clean implementation of
the calculus, showing its effectiveness and providing a
base for further improvements and extensions. Based on a
brief summary of the Model Evolution Calculus, we describe
in the main part of the paper Darwin's proof procedure and
its data structures and algorithms, discussing the main
design decisions and features that influence Darwin's
performance. We also report on practical experiments
carried out with problems from the CADE-18 and CADE-19
system competitions, as well as on results on parts of the
TPTP problem library.},
publisher = {Elsevier}
}
@article{Baumgartner:etal:in2math:swtechnik:2004,
author = {Peter Baumgartner and Barbara Grabowski and Walter Oevel
and Erica Melis},
title = {{I}n2Math - {I}nteraktive {M}athematik- und
{I}nformatikgrundausbildung},
journal = {Softwaretechnik-Trends},
year = {2004},
optkey = {},
url = {http://www.uni-koblenz.de/~peter/Publications/in2math.pdf},
volume = {24},
number = {1},
pages = {36-45},
optmonth = {},
optnote = {},
optannote = {}
}