Compare commits

...

38 commits

Author SHA1 Message Date
Jeff Epler
4f29a48653 rtai_math: finite was defined, not exported 2014-09-19 19:59:56 -05:00
Jeff Epler
ce81999e66 rtai_math: get rid of use of userspace headers in kernel modules 2014-09-19 19:59:50 -05:00
Alec Ari
fa71c0c76d Update README.md
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-09-14 02:53:12 -05:00
Alec Ari
a29b0bee30 Fix 32-bit build
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-09-14 02:52:06 -05:00
Alec Ari
3b1a9dd0f2 Trim configs
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-09-11 23:01:35 -05:00
Alec Ari
08618bbf48 Add big generic kernel configs
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-09-10 23:16:56 -05:00
Alec Ari
c0282fe4ea Optimize FORTIFY_SOURCE
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-28 12:34:32 -05:00
Alec Ari
47a1f7f74b Bump 3.10 kernel support
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-27 13:59:21 -05:00
Alec Ari
2f749c34ce Fix spurious APIC interrupts
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 08:41:47 -05:00
Alec Ari
d4cf50bee7 Trim it down
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 08:29:44 -05:00
Alec Ari
2df23cc0de Fix latency bug for my board (trivial)
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 08:16:07 -05:00
Alec Ari
037c704ccc Remove bits and tbx
Not needed for LinuxCNC

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 07:39:47 -05:00
Alec Ari
783893c929 Remove dead code in xn.h
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 07:39:47 -05:00
Alec Ari
2fb9c58762 Remove last bit of RTDM
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-26 07:39:46 -05:00
Alec Ari
3086517cd2 Too old to matter
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-23 12:04:35 -05:00
Alec Ari
d897f9c2d7 Silences some warnings, not sure why
If problems arise I will revert this

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-23 11:57:53 -05:00
Alec Ari
9839efa96b Force FPU, clean-up FPU headers
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-23 08:06:36 -05:00
Alec Ari
edb6c1a7ce Modify kernel kconfig for ipipe
Fixes kernel panic for timer IO-APIC bug

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-23 07:43:05 -05:00
Alec Ari
ea85023591 Merge branch 'master' of github.com:NTULINUX/RTAI 2014-08-23 04:18:33 -05:00
Alec Ari
b39e77aaf7 Merge pull request #2 from jepler/remove-nonfree-code-harder
Remove cut & paste copies of non-free code
2014-08-23 08:50:18 -05:00
Jeff Epler
de529b0c0d Remove cut & paste copies of non-free code
.. rather than using this implementation of isinf with dubious
provenance, we can depend on __builtin_isinf, which compiles to
an inline instruction sequence on i386, x86_64, and armhf.
2014-08-23 08:40:11 -05:00
Alec Ari
26da62c969 Re-work RTDM includes
Remove RTDM as well

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-23 04:17:44 -05:00
Alec Ari
5c10841066 Whitespace fixes
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 21:02:41 -05:00
Alec Ari
b01be3d331 Clean up math, force IEEE
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 20:24:32 -05:00
Alec Ari
e2bb11fcf5 Mention SSE2 as requirement
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 18:00:29 -05:00
Alec Ari
c98c84fe9b Modify my own license
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 17:46:40 -05:00
Alec Ari
d2deb6292b Merge branch 'remove-nonfree-code' of github.com:jepler/RTAI
Signed-off-by: Alec Ari <neotheuser@ymail.com>

Conflicts:
	base/math/GNUmakefile.am
2014-08-22 17:44:06 -05:00
Alec Ari
550aeaed4b More clean-up, rework more defaults
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:43:27 -05:00
Alec Ari
96857bcbae Add m4 dir
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:24:44 -05:00
Alec Ari
7960e1643b Add m4 dir to .gitignore
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:16:21 -05:00
Alec Ari
3e171e8003 Remove m4 files in dir
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:13:32 -05:00
Alec Ari
0adf90be69 Also remove rtai_config* upon distclean
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:11:43 -05:00
Alec Ari
3e16737bf2 Remove GNUmakefile.in upon distclean
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:06:25 -05:00
Alec Ari
42ab605bfa Minor cleanup
Fixes distclean and forces MATH and C99

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 05:02:39 -05:00
Alec Ari
24815b03e9 Remove redundant CFLAGS
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 04:41:16 -05:00
Alec Ari
659fd69c1b Rework Kconfig defaults
Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-22 04:40:37 -05:00
Alec Ari
fbb1f385cd Add fmix / fmax functions
Required for LinuxCNC on 3.14+

Signed-off-by: Alec Ari <neotheuser@ymail.com>
2014-08-20 07:12:14 -05:00
Jeff Epler
04c0b06102 Remove files that do not have a distributable license
These files do not carry a licences that makes them distributable.
However, all of them appear to be either unused or not required by
linuxcnc, based on my analysis at
    http://mid.gmane.org/20131101024943.GA31516%40unpythonic.net
2014-08-20 07:04:33 -05:00
232 changed files with 11304 additions and 29457 deletions

1
.gitignore vendored
View file

@ -25,6 +25,7 @@ configure
aclocal.m4 aclocal.m4
ltversion.m4 ltversion.m4
bin/ bin/
m4/*
doc/guide/*.html doc/guide/*.html
addons/drivers/16550A/FORCE addons/drivers/16550A/FORCE

View file

@ -1,12 +1,6 @@
OPTDIRS = SUBDIRS = base
OPTDIRS += addons DIST_SUBDIRS = base
SUBDIRS = base $(OPTDIRS)
DIST_SUBDIRS = \
base \
addons
EXTRA_DIST = \ EXTRA_DIST = \
README.INSTALL \ README.INSTALL \
@ -76,7 +70,8 @@ clean-local:
distclean-local: distclean-local:
$(MAKE) -C $(srcdir)/base/config/kconfig \ $(MAKE) -C $(srcdir)/base/config/kconfig \
-f Makefile.kconfig distclean srctree=$(srcdir) -f Makefile.kconfig distclean srctree=$(srcdir)
find . -name autom4te.cache | xargs rm -fr find . -name GNUmakefile.in | xargs rm -fr
rm -rf rtai_config* autom4te.cache
else else
clean-local: clean-local:
rm -f .cfchanged rm -f .cfchanged
@ -98,9 +93,7 @@ install-exec-local: devices
dist-hook: dist-hook:
$(MAKE) -C $(distdir)/base/config/kconfig \ $(MAKE) -C $(distdir)/base/config/kconfig \
-f Makefile.kconfig distclean srctree=$(distdir) -f Makefile.kconfig distclean srctree=$(srcdir)
rm -fr `find $(distdir) -name CVS`
test -e $(srcdir)/doc || rm -fr $(distdir)/doc
dev devices: dev devices:
@if test -r $(DESTDIR)/etc/udev/udev.rules ; then \ @if test -r $(DESTDIR)/etc/udev/udev.rules ; then \

View file

@ -1,333 +1,39 @@
Installing RTAI-3.x === Installing RTAI ===
===================
0. RTAI 3.6 Commands marked with # specify root, commands marked with $ are user commands.
================================
RTAI uses only the leanest and fastest RTAI+Linux combination now, i.e. it 1.) Download the RTAI source
dispatches real time interrupts immediately. Moreover within such a scheme
i386 has the possibility of completely avoiding the dispatching of critical
internal timers and interprocessor scheduling interrupts by vectoring them
directly to their handlers. Such an option strongly enhances performances
on low end machines and comes by default. You can disable it while configuring
RTAI though. See the related configuration helper.
What above requires you to patch Linux just with patches found wihin this
distribution.
Notice also that this release is for use with:
- i386,
- x86_64,
- PPC.
On recent distributions, managing udev/sysfs, devices created by following $ cd ~/ && git clone https://github.com/NTULINUX/RTAI.git
this installation procedure will disappear at each machine halting. Then they $ sudo cp -v base/arch/x86/patches/hal-linux-3.14.17-x86-1.patch /usr/src/
will be available nomore at the next reboot. RTAI should manage udev/sysfs
support but in case something weird will impede it you can solve your problem
quickly by restoring RTAI devices before running your application. For that
the following short script containing:
------------------------------ cut here -------------------------------------
if test \! -c /dev/rtai_shm; then
mknod -m 666 /dev/rtai_shm c 10 254
fi
for n in `seq 0 9`; do
f=/dev/rtf$n
if test \! -c $f; then
mknod -m 666 $f c 150 $n
fi
done
------------------------------ cut here -------------------------------------
should suffice.
1. How to build 2.) Patch kernel with IPIPE, in this case we use 3.14.17
===============
1.1 Standard procedure # cd /usr/src
---------------------- # rm -rf linux
# wget https://www.kernel.org/pub/linux/kernel/v3.x/linux-3.14.17.tar.xz
# cd linux-3.14.17
# patch -p1 < ../hal-linux-3.14.17-x86-1.patch
# make menuconfig
The RTAI build system is a merge of Linux's Kconfig with Configure to your needs
autoconf/automake/libtool. Therefore, you can either build RTAI:
1.1.1 Interactive configuration # make
------------------------------- # make modules_install
1) Into the source tree like with 24.1.x (your_source_dir == 3.) Update grub
your_build_dir). Just run either:
$ make xconfig # (Qt-based) Refer to your distribution documentation to do this
$ make gconfig # (GTK-based)
$ make menuconfig (dialog-based as in 24.1.x)
Save your configuration, wait for the shell prompt to come back after 4.) Compile, configure and install RTAI (MUST BE BOOTED INTO YOUR NEW KERNEL)
the configuration script has fully finished, then run "make".
2) Outside the source tree. From your fresh build directory, $ cd ~/RTAI
either run: $ ./autogen.sh
$ make menuconfig
$ make -f $source_tree/makefile srctree=$source_tree xconfig Configure to your needs
$ make -f $source_tree/makefile srctree=$source_tree gconfig
$ make -f $source_tree/makefile srctree=$source_tree menuconfig
If you are using a version of make >= 3.80, then you can even get rid $ make
of the "srctree=$source_tree" setting. The makefile will infere its $ sudo make install
value properly.
1.1.2 Non-interactive configuration 5.) Go get LinuxCNC and compile that.
-----------------------------------
Since RTAI 3.x has autoconf inside, people needing automatic
non-interactive configuration can directly use the provided GNU
configure script for this purpose. The available configuration
switches can be listed by running ./configure --help. The
RTAI-specific switches are:
--enable-trace Enable trace support
--enable-math Enable math support
--enable-bits Enable bits IPC support
--enable-fifos Enable fifo IPC support
--enable-netrpc Enable netrpc support
--enable-netrpc-rtnet Enable use of RTNet
--enable-sem Enable semaphore support
--enable-msg Enable messaging support
--enable-mbx Enable mailbox support
--enable-tbx Enable typed mailbox support
--enable-mq Enable POSIX-like message queue support
--enable-shm Enable shared memory support
--enable-malloc Enable real-time malloc support
--enable-tasklets Enable tasklets support
--enable-usi Enable user-space interrupt support
--enable-watchdog Enable watchdog support
--enable-leds Enable leds-based debugging support
--enable-sched-lxrt Enable scheduler support without RTAI own ktasks
--enable-ktasks-sched-lxrt Enable scheduler support with RTAI own ktasks
--enable-sched-lock-isr Enable scheduler lock in ISRs
--enable-sched-8254-latency= Set 8254 tuning latency
--enable-sched-apic-latency= Set APIC tuning latency
--enable-sched-lxrt-numslots= Set number of LXRT slots
--enable-cplusplus Build in-kernel C++ support
--enable-comedi-lxrt Enable comedi/LXRT support
--enable-serial Build real-time serial driver
--enable-testsuite Build RTAI testsuite
--enable-rtailab Build RTAI-Lab
--enable-fpu Enable FPU support
--enable-math-c99 Enable math C99 support
--enable-malloc-vmalloc Enable vmalloc support in malloc
--enable-malloc-heap-size Set size of real-time malloc heap
--enable-cpus Enable CPUs
--enable-dox-doc Build Doxygen documentation
--enable-dbx Build Docbook XML documentation.
--enable-dbx-network Try to access Docbook DTD and XSL stylesheets through
network.
--enable-latex-doc Build LaTeX documentation
--enable-verbose-latex Uses LaTeX non-stop mode
--enable-compat Enable compatibility mode
--enable-module-debug Enable debug information in kernel modules
--enable-user-debug Enable debug information in user-space programs
Some configuration targets in base/ can either produce a module,
or be statically linked to the RTAI schedulers. Either pass "m" for
the modular build to their respective --enable-xxx switch, or "y" for
selecting the built-in mode.
1.1.3 Recycling a configuration file
------------------------------------
You can also recycle an existing .rtai_config file from a previous
build by running:
$ cp -rp $old_builddir/.rtai_config \
$new_builddir/.rtai_config
$ cd $new_builddir
$ make -f $source_tree/makefile srctree=$source_tree oldconfig
1.1.4 Selecting alternate compilers
-----------------------------------
Compiler selection must be done at _configuration_ time. One only
needs to pass the right values for the standard environment variables
CC and CXX, respectively for compiling C and C++ source files. In any
case, using a GCC toolchain is mandatory. When unspecified, these
variables's values respectively default to "gcc" and "g++".
WARNING: this selection is not aimed at toggling the cross-compilation
mode on. In order to do so, please refer to 1.2.
Examples:
# Start configuring using the Qt-based GUI with alternate compilers
$ make xconfig CC=/my/favourite/c/compiler CXX=/my/favourite/c++/compiler
# Reconfiguring a previous build tree, changing the compilers on-the-fly.
$ make reconfig CC=/my/favourite/c/compiler CXX=/my/favourite/c++/compiler
# Rebuild all [and optionally install]
$ make [all [install]]
CAVEAT: Since the autoconf-based engine needs to analyze the
compilers's features and crams the CC and CXX values into the
Makefiles it produces, passing CC and CXX variables at build time
simply does _not_ work for the purpose of selecting alternate compiler
toolchains. Again, you need to let the configuration engine know about
these new settings as explained above.
1.2 Installing the software
---------------------------
When the standard (or cross-) compilation has finished:
$ cd $builddir
$ make install
Everything needed to use the RTAI distribution will be copied there
out of the build tree. From now on, you should be able to refer to the
installation directory as the root of a complete standalone binary
RTAI distribution.
One may also choose to install the RTAI programs under a temporary
staging tree by passing the standard DESTDIR variable to "make" while
installing. e.g.
$ make install DESTDIR=/mnt/target
will create a standard RTAI hierarchy under /mnt/target, keeping the
original prefix information unmodified. If the installation directory
selected at configuration time was "/usr/realtime", then the command
above will put the RTAI binary distribution under
/mnt/target/usr/realtime.
WARNING: running "make install" is required to run several standard
RTAI programs correctly. RTAI 3.x enforces the actual split between
the source distribution tree, the build tree where RTAI is going to be
compiled, and the final installation directory where RTAI programs can
be run eventually. In any case, you should only rely on the
installation directory contents to run RTAI programs.
NOTE: Do not pay attention to the "*** Warning" messages appearing on
module compilation output. They are harmless and will be fixed later.
1.3 Compiling parts of the tree
-------------------------------
RTAI developers may want to recompile parts of the tree from times to
times. This automake-based build system allows it: just go to the
proper directory level, then issue "make", as usual. This process will
recurse as needed.
The "makefile" (small caps) found in the root source directory is used
to bootstrap the build system when it is about to be configured for
the first time. After the first configuration has been successful, one
just need to run "make xconfig|gconfig|menuconfig|..." as usual.
1.4 Changing the configuration
------------------------------
Each time you want to change your configuration, just run "make
xconfig|gconfig|menuconfig" in your build dir, then "make" (or "make
all").
If you only want to rerun the "configure" script using the current
RTAI configuration, just run "make reconfig", there is no need to
restart any GUI for that.
When invoked for the first time in an empty build directory, the
default "make" goal is "menuconfig" in a regular terminal, or
"xconfig" in an emacs-term.
1.5 Modifying the autotool template files
-----------------------------------------
If you have to change some template files used by any of the autotools
(i.e. Makefile.am, configure.in, acinclude.m4), then you will need the
following pack to rebuild the derived files:
o autoconf 2.59
o automake 1.9.2
o aclocal 1.9.2
o libtool 1.5.8
1.6 Using the integrated calibration tool
-----------------------------------------
RTAI 3.x comes with a brand new calibration tool which should help you
determining if your hardware platform is up to the hard real-time
duties. You can find this tool under $installdir/calibration. Here is
the output of this tool's help message for x86-based platforms:
$ ./calibration --help
OPTIONS:
-h, --help
print usage
-r, --r8254
calibrate 8254 oneshot programming type
-k, --kernel
oneshot latency calibrated for scheduling kernel space tasks
-u, --user
oneshot latency calibrated for scheduling user space tasks
-p <period (us)>, --period <period (us)>
the period of the underlying hard real time task/intr, default 100 (us)
-t <duration (s)>, --time <duration (s)>
set the duration of the requested calibration, default 5 (s)
-c, --cpufreq
calibrate cpu frequency
-a, --apic
calibrate apic frequency
-b, --both
calibrate both apic and cpu frequency
-i, --interrupt
check worst case interrupt locking/contention on your PC
-s<y/n>, --scope<y/n>
toggle parport bit to monitor scheduling on a scope, default y(es)
2. Bootstrapping with vulcano in 7 steps
========================================
>1. patch and build a vanilla Linux kernel tree with the RTAI support as
usual. Patches for x86 are available from
vulcano/base/arch/i386/patches/. Apply only one of them
that matches the Linux kernel version, like this:
$ cd $linux-src-dir
$ patch -p1 < base/arch/$arch/patches/patchfile
>2. $ mkdir $builddir && cd $builddir
>3. $ make -f ../vulcano/makefile srctree=../vulcano
>4. a GUI should pop up, allowing you to configure RTAI:
o default settings should be ok for most platforms
o in the "General" section, set your site values for the RTAI
installation directory (defaults to /usr/realtime) and
Linux kernel tree (defaults to /usr/src/linux).
o save and exit.
--
At this point, you should see the typical output of a GNU configure
script. Your RTAI settings are being used to setup the
autoconf/automake-based build system.
--
>5. $ make install
RTAI will be compiled then installed under the directory you specified
at configuration time (ensure that your shell can write to the
destination directory).
Remember to add $installdir/bin to your shell PATH variable, where
$installdir is your site value for the RTAI installation directory.
--
>6. $ cd $installdir/testsuite/kern/latency
>7. $ ./run
If "sudo" is installed on your box, the application loader script
(rtai-load) will attempt to use it for running privileged commands
(e.g. insmod, rmmod et al.) If sudo is not available, just "su"
manually before running the run script. You should then see the
typical output of the latency calibration program running in kernel
space. Hit ^C to stop it.
--
<rpm@xenomai.org>
12/5/2004

View file

@ -1 +1,7 @@
RTAI for LinuxCNC RTAI for LinuxCNC
Tested and confirmed working on AMD64 and i686 platforms.
SSE, SSE2, and FPU support required.
For more information, visit https://gcc.gnu.org/wiki/FloatingPointMath

View file

@ -1,5 +0,0 @@
OPTDIRS =
OPTDIRS += rtdm
SUBDIRS = $(OPTDIRS)

View file

@ -1,81 +0,0 @@
moduledir = @RTAI_MODULE_DIR@
includedir = $(prefix)/include/rtdm
modext = @RTAI_MODULE_EXT@
librtdm_a_SOURCES = \
core.c \
device.c \
drvlib.c \
module.c \
proc.c \
rtai_taskq.c \
select.c \
vfile.c
include_HEADERS = \
rtdm.h \
rtdm_driver.h \
rtserial.h \
xn.h \
rtai_taskq.h \
select.h \
vfile.h
if CONFIG_KBUILD
rtai_rtdm$(modext): @RTAI_KBUILD_ENV@
rtai_rtdm$(modext): $(librtdm_a_SOURCES) FORCE
@RTAI_KBUILD_TOP@ \
@RTAI_KBUILD_CMD@ rtai_extradef="@RTAI_KMOD_CFLAGS@" \
@RTAI_KBUILD_BOTTOM@
clean-local:
@RTAI_KBUILD_CLEAN@
rm -f FORCE
distclean-local:
@RTAI_KBUILD_DISTCLEAN@
else
noinst_LIBRARIES = librtdm.a
librtdm_a_AR = ar cru
AM_CPPFLAGS = \
@RTAI_KMOD_CFLAGS@ \
-I$(top_srcdir)/base/include \
-I$(top_srcdir)/addons \
-I../../base/include \
-I../.. \
-I..
rtai_rtdm.o: librtdm.a
$(LD) --whole-archive $< -r -o $@
endif
all-local: rtai_rtdm$(modext)
if CONFIG_RTAI_OLD_FASHIONED_BUILD
$(mkinstalldirs) $(top_srcdir)/modules
$(INSTALL_DATA) $^ $(top_srcdir)/modules
endif
install-exec-local: rtai_rtdm$(modext)
$(mkinstalldirs) $(DESTDIR)$(moduledir) $(DESTDIR)$(includedir)
$(INSTALL_DATA) $< $(DESTDIR)$(moduledir)
uninstall-local:
$(RM) $(DESTDIR)$(moduledir)/rtai_rtdm$(modext)
.PHONY: FORCE
EXTRA_DIST = $(libmodule_SRC) Makefile.kbuild \
internal.h
# Always build if CONFIG_RTAI_RTDM is enabled
if CONFIG_RTAI_RTDM
OPTDIRS = lib
endif
SUBDIRS = $(OPTDIRS)

View file

@ -1,12 +0,0 @@
EXTRA_CFLAGS += -I$(rtai_srctree)/base/include \
-I$(rtai_srctree)/addons/rtdm \
-I$(rtai_srcdir)/.. \
-I$(src)/../../base/include \
-I$(src) \
-I$(src)/../.. \
$(rtai_extradef)
obj-m += $(rtai_target).o
$(rtai_target)-objs := $(rtai_objs)

File diff suppressed because it is too large Load diff

View file

@ -1,522 +0,0 @@
/**
* @file
* Real-Time Driver Model for RTAI, device management
*
* @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
* @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
*
* with adaptions for RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*!
* @addtogroup driverapi
* @{
*/
#include <linux/module.h>
#include <linux/delay.h>
#include "rtdm/internal.h"
#define SET_DEFAULT_OP(device, operation) \
(device).operation##_rt = (void *)rtdm_no_support; \
(device).operation##_nrt = (void *)rtdm_no_support
#define SET_DEFAULT_OP_IF_NULL(device, operation) \
if (!(device).operation##_rt) \
(device).operation##_rt = (void *)rtdm_no_support; \
if (!(device).operation##_nrt) \
(device).operation##_nrt = (void *)rtdm_no_support
#define ANY_HANDLER(device, operation) \
((device).operation##_rt || (device).operation##_nrt)
unsigned int devname_hashtab_size = DEF_DEVNAME_HASHTAB_SIZE;
unsigned int protocol_hashtab_size = DEF_PROTO_HASHTAB_SIZE;
module_param(devname_hashtab_size, uint, 0400);
module_param(protocol_hashtab_size, uint, 0400);
MODULE_PARM_DESC(devname_hashtab_size,
"Size of hash table for named devices (must be power of 2)");
MODULE_PARM_DESC(protocol_hashtab_size,
"Size of hash table for protocol devices "
"(must be power of 2)");
struct list_head *rtdm_named_devices; /* hash table */
struct list_head *rtdm_protocol_devices; /* hash table */
static int name_hashkey_mask;
static int proto_hashkey_mask;
int rtdm_apc;
EXPORT_SYMBOL(rtdm_apc);
struct semaphore nrt_dev_lock;
DEFINE_XNLOCK(rt_dev_lock);
#ifndef MODULE
int rtdm_initialised = 0;
#endif /* !MODULE */
int rtdm_no_support(void)
{
return -ENOSYS;
}
int rtdm_select_bind_no_support(struct rtdm_dev_context *context,
struct xnselector *selector,
unsigned type,
unsigned index)
{
return -EBADF;
}
static inline int get_name_hash(const char *str, int limit, int hashkey_mask)
{
int hash = 0;
while (*str != 0) {
hash += *str++;
if (--limit == 0)
break;
}
return hash & hashkey_mask;
}
static inline int get_proto_hash(int protocol_family, int socket_type)
{
return protocol_family & proto_hashkey_mask;
}
static inline void rtdm_reference_device(struct rtdm_device *device)
{
atomic_inc(&device->reserved.refcount);
}
struct rtdm_device *get_named_device(const char *name)
{
struct list_head *entry;
struct rtdm_device *device;
int hashkey;
spl_t s;
hashkey = get_name_hash(name, RTDM_MAX_DEVNAME_LEN, name_hashkey_mask);
xnlock_get_irqsave(&rt_dev_lock, s);
list_for_each(entry, &rtdm_named_devices[hashkey]) {
device = list_entry(entry, struct rtdm_device, reserved.entry);
if (strcmp(name, device->device_name) == 0) {
rtdm_reference_device(device);
xnlock_put_irqrestore(&rt_dev_lock, s);
return device;
}
}
xnlock_put_irqrestore(&rt_dev_lock, s);
return NULL;
}
struct rtdm_device *get_protocol_device(int protocol_family, int socket_type)
{
struct list_head *entry;
struct rtdm_device *device;
int hashkey;
spl_t s;
hashkey = get_proto_hash(protocol_family, socket_type);
xnlock_get_irqsave(&rt_dev_lock, s);
list_for_each(entry, &rtdm_protocol_devices[hashkey]) {
device = list_entry(entry, struct rtdm_device, reserved.entry);
if ((device->protocol_family == protocol_family) &&
(device->socket_type == socket_type)) {
rtdm_reference_device(device);
xnlock_put_irqrestore(&rt_dev_lock, s);
return device;
}
}
xnlock_put_irqrestore(&rt_dev_lock, s);
return NULL;
}
/*!
* @ingroup driverapi
* @defgroup devregister Device Registration Services
* @{
*/
/**
* @brief Register a RTDM device
*
* @param[in] device Pointer to structure describing the new device.
*
* @return 0 is returned upon success. Otherwise:
*
* - -EINVAL is returned if the device structure contains invalid entries.
* Check kernel log in this case.
*
* - -ENOMEM is returned if the context for an exclusive device cannot be
* allocated.
*
* - -EEXIST is returned if the specified device name of protocol ID is
* already in use.
*
* - -EAGAIN is returned if some /proc entry cannot be created.
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
*
* Rescheduling: never.
*/
int rtdm_dev_register(struct rtdm_device *device)
{
int hashkey;
spl_t s;
struct list_head *entry;
struct rtdm_device *existing_dev;
int ret;
/* Catch unsuccessful initialisation */
if (!rtdm_initialised)
return -ENOSYS;
/* Sanity check: structure version */
RTAI_ASSERT(RTDM, device->struct_version == RTDM_DEVICE_STRUCT_VER,
xnlogerr("RTDM: invalid rtdm_device version (%d, "
"required %d)\n", device->struct_version,
RTDM_DEVICE_STRUCT_VER);
return -EINVAL;);
/* Sanity check: proc_name specified? */
RTAI_ASSERT(RTDM, device->proc_name,
xnlogerr("RTDM: no /proc entry name specified\n");
return -EINVAL;);
switch (device->device_flags & RTDM_DEVICE_TYPE_MASK) {
case RTDM_NAMED_DEVICE:
/* Sanity check: any open handler? */
RTAI_ASSERT(RTDM, ANY_HANDLER(*device, open),
xnlogerr("RTDM: missing open handler\n");
return -EINVAL;);
if (device->open_rt &&
device->socket_rt != (void *)rtdm_no_support)
xnlogerr("RTDM: RT open handler is deprecated, "
"driver requires update.\n");
SET_DEFAULT_OP_IF_NULL(*device, open);
SET_DEFAULT_OP(*device, socket);
break;
case RTDM_PROTOCOL_DEVICE:
/* Sanity check: any socket handler? */
RTAI_ASSERT(RTDM, ANY_HANDLER(*device, socket),
xnlogerr("RTDM: missing socket handler\n");
return -EINVAL;);
if (device->socket_rt &&
device->socket_rt != (void *)rtdm_no_support)
xnlogerr("RTDM: RT socket creation handler is "
"deprecated, driver requires update.\n");
SET_DEFAULT_OP_IF_NULL(*device, socket);
SET_DEFAULT_OP(*device, open);
break;
default:
return -EINVAL;
}
/* Sanity check: non-RT close handler?
* (Always required for forced cleanup) */
if (!device->ops.close_nrt) {
xnlogerr("RTDM: missing non-RT close handler\n");
return -EINVAL;
}
if (device->ops.close_rt &&
device->ops.close_rt != (void *)rtdm_no_support)
xnlogerr("RTDM: RT close handler is deprecated, driver "
"requires update.\n");
else
device->ops.close_rt = (void *)rtdm_no_support;
SET_DEFAULT_OP_IF_NULL(device->ops, ioctl);
SET_DEFAULT_OP_IF_NULL(device->ops, read);
SET_DEFAULT_OP_IF_NULL(device->ops, write);
SET_DEFAULT_OP_IF_NULL(device->ops, recvmsg);
SET_DEFAULT_OP_IF_NULL(device->ops, sendmsg);
if (!device->ops.select_bind)
device->ops.select_bind = rtdm_select_bind_no_support;
atomic_set(&device->reserved.refcount, 0);
device->reserved.exclusive_context = NULL;
if (device->device_flags & RTDM_EXCLUSIVE) {
device->reserved.exclusive_context =
kmalloc(sizeof(struct rtdm_dev_context) +
device->context_size, GFP_KERNEL);
if (!device->reserved.exclusive_context) {
xnlogerr("RTDM: no memory for exclusive context "
"(context size: %ld)\n",
(long)device->context_size);
return -ENOMEM;
}
/* mark exclusive context as unused */
device->reserved.exclusive_context->device = NULL;
}
down(&nrt_dev_lock);
if ((device->device_flags & RTDM_DEVICE_TYPE_MASK) == RTDM_NAMED_DEVICE) {
trace_mark(xn_rtdm, nameddev_register, "device %p name %s "
"flags %d class %d sub_class %d profile_version %d "
"driver_version %d", device, device->device_name,
device->device_flags, device->device_class,
device->device_sub_class, device->profile_version,
device->driver_version);
hashkey =
get_name_hash(device->device_name, RTDM_MAX_DEVNAME_LEN,
name_hashkey_mask);
list_for_each(entry, &rtdm_named_devices[hashkey]) {
existing_dev =
list_entry(entry, struct rtdm_device,
reserved.entry);
if (strcmp(device->device_name,
existing_dev->device_name) == 0) {
ret = -EEXIST;
goto err;
}
}
ret = rtdm_proc_register_device(device);
if (ret)
goto err;
xnlock_get_irqsave(&rt_dev_lock, s);
list_add_tail(&device->reserved.entry,
&rtdm_named_devices[hashkey]);
xnlock_put_irqrestore(&rt_dev_lock, s);
up(&nrt_dev_lock);
} else {
trace_mark(xn_rtdm, protocol_register, "device %p "
"protocol_family %d socket_type %d flags %d "
"class %d sub_class %d profile_version %d "
"driver_version %d", device,
device->protocol_family, device->socket_type,
device->device_flags, device->device_class,
device->device_sub_class, device->profile_version,
device->driver_version);
hashkey = get_proto_hash(device->protocol_family,
device->socket_type);
list_for_each(entry, &rtdm_protocol_devices[hashkey]) {
existing_dev =
list_entry(entry, struct rtdm_device,
reserved.entry);
if ((device->protocol_family ==
existing_dev->protocol_family)
&& (device->socket_type ==
existing_dev->socket_type)) {
xnlogerr("RTDM: protocol %u:%u already "
"exists\n", device->protocol_family,
device->socket_type);
ret = -EEXIST;
goto err;
}
}
ret = rtdm_proc_register_device(device);
if (ret)
goto err;
xnlock_get_irqsave(&rt_dev_lock, s);
list_add_tail(&device->reserved.entry,
&rtdm_protocol_devices[hashkey]);
xnlock_put_irqrestore(&rt_dev_lock, s);
up(&nrt_dev_lock);
}
return 0;
err:
up(&nrt_dev_lock);
if (device->reserved.exclusive_context)
kfree(device->reserved.exclusive_context);
return ret;
}
EXPORT_SYMBOL(rtdm_dev_register);
/**
* @brief Unregisters a RTDM device
*
* @param[in] device Pointer to structure describing the device to be
* unregistered.
* @param[in] poll_delay Polling delay in milliseconds to check repeatedly for
* open instances of @a device, or 0 for non-blocking mode.
*
* @return 0 is returned upon success. Otherwise:
*
* - -ENODEV is returned if the device was not registered.
*
* - -EAGAIN is returned if the device is busy with open instances and 0 has
* been passed for @a poll_delay.
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
*
* Rescheduling: never.
*/
int rtdm_dev_unregister(struct rtdm_device *device, unsigned int poll_delay)
{
spl_t s;
struct rtdm_device *reg_dev;
unsigned long warned = 0;
if (!rtdm_initialised)
return -ENOSYS;
if ((device->device_flags & RTDM_DEVICE_TYPE_MASK) == RTDM_NAMED_DEVICE)
reg_dev = get_named_device(device->device_name);
else
reg_dev = get_protocol_device(device->protocol_family,
device->socket_type);
if (!reg_dev)
return -ENODEV;
trace_mark(xn_rtdm, dev_unregister, "device %p poll_delay %u",
device, poll_delay);
down(&nrt_dev_lock);
xnlock_get_irqsave(&rt_dev_lock, s);
while (atomic_read(&reg_dev->reserved.refcount) > 1) {
xnlock_put_irqrestore(&rt_dev_lock, s);
up(&nrt_dev_lock);
if (!poll_delay) {
rtdm_dereference_device(reg_dev);
trace_mark(xn_rtdm, dev_busy, "device %p", device);
return -EAGAIN;
}
if (!__test_and_set_bit(0, &warned))
xnlogwarn("RTDM: device %s still in use - waiting for "
"release...\n", reg_dev->device_name);
msleep(poll_delay);
trace_mark(xn_rtdm, dev_poll, "device %p", device);
down(&nrt_dev_lock);
xnlock_get_irqsave(&rt_dev_lock, s);
}
list_del(&reg_dev->reserved.entry);
xnlock_put_irqrestore(&rt_dev_lock, s);
rtdm_proc_unregister_device(device);
up(&nrt_dev_lock);
if (reg_dev->reserved.exclusive_context)
kfree(device->reserved.exclusive_context);
return 0;
}
EXPORT_SYMBOL(rtdm_dev_unregister);
/** @} */
int __init rtdm_dev_init(void)
{
int err, i;
sema_init(&nrt_dev_lock, 1);
rtdm_apc = rthal_apc_alloc("deferred RTDM close", rtdm_apc_handler,
NULL);
if (rtdm_apc < 0)
return rtdm_apc;
name_hashkey_mask = devname_hashtab_size - 1;
proto_hashkey_mask = protocol_hashtab_size - 1;
if (((devname_hashtab_size & name_hashkey_mask) != 0) ||
((protocol_hashtab_size & proto_hashkey_mask) != 0)) {
err = -EINVAL;
goto err_out1;
}
rtdm_named_devices = (struct list_head *)
kmalloc(devname_hashtab_size * sizeof(struct list_head),
GFP_KERNEL);
if (!rtdm_named_devices) {
err = -ENOMEM;
goto err_out1;
}
for (i = 0; i < devname_hashtab_size; i++)
INIT_LIST_HEAD(&rtdm_named_devices[i]);
rtdm_protocol_devices = (struct list_head *)
kmalloc(protocol_hashtab_size * sizeof(struct list_head),
GFP_KERNEL);
if (!rtdm_protocol_devices) {
err = -ENOMEM;
goto err_out2;
}
for (i = 0; i < protocol_hashtab_size; i++)
INIT_LIST_HEAD(&rtdm_protocol_devices[i]);
return 0;
err_out2:
kfree(rtdm_named_devices);
err_out1:
rthal_apc_free(rtdm_apc);
return err;
}
void rtdm_dev_cleanup(void)
{
/*
* Note: no need to flush the cleanup_queue as no device is allowed
* to deregister as long as there are references.
*/
rthal_apc_free(rtdm_apc);
kfree(rtdm_named_devices);
kfree(rtdm_protocol_devices);
}
/*@}*/

View file

@ -1,67 +0,0 @@
/*
* Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
* Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
*
* with adaptions for RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _RTDM_DEVICE_H
#define _RTDM_DEVICE_H
#include <rtdm/rtdm_driver.h>
#include <linux/sem.h>
#define DEF_DEVNAME_HASHTAB_SIZE 256 /* entries in name hash table */
#define DEF_PROTO_HASHTAB_SIZE 256 /* entries in protocol hash table */
extern struct semaphore nrt_dev_lock;
extern xnlock_t rt_dev_lock;
extern unsigned int devname_hashtab_size;
extern unsigned int protocol_hashtab_size;
extern struct list_head *rtdm_named_devices;
extern struct list_head *rtdm_protocol_devices;
#ifdef MODULE
#define rtdm_initialised 1
#else /* !MODULE */
extern int rtdm_initialised;
#endif /* MODULE */
int rtdm_no_support(void);
struct rtdm_device *get_named_device(const char *name);
struct rtdm_device *get_protocol_device(int protocol_family, int socket_type);
static inline void rtdm_dereference_device(struct rtdm_device *device)
{
atomic_dec(&device->reserved.refcount);
}
int __init rtdm_dev_init(void);
static inline void rtdm_dev_cleanup(void)
{
kfree(rtdm_named_devices);
kfree(rtdm_protocol_devices);
}
#endif /* _RTDM_DEVICE_H */

File diff suppressed because it is too large Load diff

View file

@ -1,106 +0,0 @@
/*
* Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
* Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
*
* with adaptions for RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _RTDM_INTERNAL_H
#define _RTDM_INTERNAL_H
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/sem.h>
#include <rtdm/rtdm_driver.h>
#define RTDM_FD_MAX CONFIG_RTAI_RTDM_FD_MAX
#define DEF_DEVNAME_HASHTAB_SIZE 256 /* entries in name hash table */
#define DEF_PROTO_HASHTAB_SIZE 256 /* entries in protocol hash table */
struct rtdm_fildes {
struct rtdm_dev_context *context;
};
struct rtdm_process {
#ifdef CONFIG_PROC_FS
char name[32];
pid_t pid;
#endif /* CONFIG_PROC_FS */
};
DECLARE_EXTERN_XNLOCK(rt_fildes_lock);
DECLARE_EXTERN_XNLOCK(rt_dev_lock);
extern int __rtdm_muxid;
extern struct rtdm_fildes fildes_table[];
extern int open_fildes;
extern struct semaphore nrt_dev_lock;
extern unsigned int devname_hashtab_size;
extern unsigned int protocol_hashtab_size;
extern struct list_head *rtdm_named_devices;
extern struct list_head *rtdm_protocol_devices;
#ifdef MODULE
#define rtdm_initialised 1
#else /* !MODULE */
extern int rtdm_initialised;
#endif /* MODULE */
void cleanup_owned_contexts(void *user_info);
int rtdm_no_support(void);
struct rtdm_device *get_named_device(const char *name);
struct rtdm_device *get_protocol_device(int protocol_family, int socket_type);
static inline void rtdm_dereference_device(struct rtdm_device *device)
{
atomic_dec(&device->reserved.refcount);
}
int __init rtdm_dev_init(void);
void rtdm_dev_cleanup(void);
#ifdef CONFIG_PROC_FS
int __init rtdm_proc_init(void);
void rtdm_proc_cleanup(void);
int rtdm_proc_register_device(struct rtdm_device *device);
void rtdm_proc_unregister_device(struct rtdm_device *device);
#else
static inline int rtdm_proc_init(void)
{
return 0;
}
void rtdm_proc_cleanup(void)
{
}
static int rtdm_proc_register_device(struct rtdm_device *device)
{
return 0;
}
static void rtdm_proc_unregister_device(struct rtdm_device *device)
{
}
#endif
void rtdm_apc_handler(void *cookie);
#endif /* _RTDM_INTERNAL_H */

View file

@ -1,14 +0,0 @@
lib_LTLIBRARIES = librtdm.la
librtdm_la_LDFLAGS = -module -version-info 0:0:0
librtdm_la_SOURCES = \
services.c
AM_CPPFLAGS = \
@RTAI_REAL_USER_CFLAGS@ \
-fno-inline \
-I../../../base/include \
-I$(top_srcdir)/base/include \
-I$(top_srcdir)/addons \
-I$(srcdir)/../..

View file

@ -1,19 +0,0 @@
/*
* Copyright (C) Pierre Cloutier <pcloutier@PoseidonControls.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the version 2 of the GNU Lesser
* General Public License as published by the Free Software
* Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
#include <rtdm/rtdm.h>

View file

@ -1,977 +0,0 @@
/**
* @file
* Real-Time Driver Model for RTAI
*
* @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
* @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
* @note Copyright (C) 2005-2010 Paolo Mantegazza <mantegazza@aero.polimi.it>
* only for the adaption to RTAI.
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*!
* @defgroup rtdm Real-Time Driver Model
*
* The Real-Time Driver Model (RTDM) provides a unified interface to
* both users and developers of real-time device
* drivers. Specifically, it addresses the constraints of mixed
* RT/non-RT systems like RTAI. RTDM conforms to POSIX
* semantics (IEEE Std 1003.1) where available and applicable.
*/
/*!
* @ingroup rtdm
* @defgroup profiles Device Profiles
*
* Device profiles define which operation handlers a driver of a certain class
* has to implement, which name or protocol it has to register, which IOCTLs
* it has to provide, and further details. Sub-classes can be defined in order
* to extend a device profile with more hardware-specific functions.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <rtdm/rtdm.h>
#include <rtdm/internal.h>
#include <rtdm/rtdm_driver.h>
MODULE_DESCRIPTION("Real-Time Driver Model");
MODULE_AUTHOR("jan.kiszka@web.de");
MODULE_LICENSE("GPL");
static RTAI_SYSCALL_MODE int sys_rtdm_fdcount(void)
{
return RTDM_FD_MAX;
}
static RTAI_SYSCALL_MODE int sys_rtdm_open(const char *path, long oflag)
{
struct task_struct *curr = current;
char krnl_path[RTDM_MAX_DEVNAME_LEN + 1];
if (unlikely(!__xn_access_ok(curr, VERIFY_READ, path, sizeof(krnl_path)))) {
return -EFAULT;
}
__xn_copy_from_user(curr, krnl_path, path, sizeof(krnl_path) - 1);
krnl_path[sizeof(krnl_path) - 1] = '\0';
return __rt_dev_open(curr, (const char *)krnl_path, oflag);
}
static RTAI_SYSCALL_MODE int sys_rtdm_socket(long protocol_family, long socket_type, long protocol)
{
return __rt_dev_socket(current, protocol_family, socket_type, protocol);
}
static RTAI_SYSCALL_MODE int sys_rtdm_close(long fd, long forced)
{
return __rt_dev_close(current, fd);
}
static RTAI_SYSCALL_MODE int sys_rtdm_ioctl(long fd, long request, void *arg)
{
return __rt_dev_ioctl(current, fd, request, arg);
}
static RTAI_SYSCALL_MODE int sys_rtdm_read(long fd, void *buf, long nbytes)
{
return __rt_dev_read(current, fd, buf, nbytes);
}
static RTAI_SYSCALL_MODE int sys_rtdm_write(long fd, void *buf, long nbytes)
{
return __rt_dev_write(current, fd, buf, nbytes);
}
static RTAI_SYSCALL_MODE int sys_rtdm_recvmsg(long fd, struct msghdr *msg, long flags)
{
struct msghdr krnl_msg;
struct task_struct *curr = current;
int ret;
if (unlikely(!__xn_access_ok(curr, VERIFY_WRITE, msg, sizeof(krnl_msg)))) {
return -EFAULT;
}
__xn_copy_from_user(curr, &krnl_msg, msg, sizeof(krnl_msg));
if ((ret = __rt_dev_recvmsg(curr, fd, &krnl_msg, flags)) >= 0) {
__xn_copy_to_user(curr, msg, &krnl_msg, sizeof(krnl_msg));
}
return ret;
}
static RTAI_SYSCALL_MODE int sys_rtdm_sendmsg(long fd, const struct msghdr *msg, long flags)
{
struct msghdr krnl_msg;
struct task_struct *curr = current;
if (unlikely(!__xn_access_ok(curr, VERIFY_READ, msg, sizeof(krnl_msg)))) {
return -EFAULT;
}
__xn_copy_from_user(curr, &krnl_msg, msg, sizeof(krnl_msg));
return __rt_dev_sendmsg(curr, fd, &krnl_msg, flags);
}
static RTAI_SYSCALL_MODE int sys_rtdm_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, nanosecs_rel_t timeout)
{
return rt_dev_select(nfds, rfds, wfds, efds, timeout);
}
static struct rt_fun_entry rtdm[] = {
[__rtdm_fdcount] = { 0, sys_rtdm_fdcount },
[__rtdm_open] = { 0, sys_rtdm_open },
[__rtdm_socket] = { 0, sys_rtdm_socket },
[__rtdm_close] = { 0, sys_rtdm_close },
[__rtdm_ioctl] = { 0, sys_rtdm_ioctl },
[__rtdm_read] = { 0, sys_rtdm_read },
[__rtdm_write] = { 0, sys_rtdm_write },
[__rtdm_recvmsg] = { 0, sys_rtdm_recvmsg },
[__rtdm_sendmsg] = { 0, sys_rtdm_sendmsg },
[__rtdm_select] = { 0, sys_rtdm_select },
};
/* This is needed because RTDM interrupt handlers:
* - do no want immediate in handler rescheduling, RTAI can be configured
* to act in the same, way but might not have been enabled to do so;
* - may not reenable the PIC directly, assuming it will be done here;
* - may not propagate, assuming it will be done here as well.
* - might use shared interrupts its own way;
* REMARK: RTDM irqs management is as generic as its pet system dictates
* and there is no choice but doing the same as closely as possible;
* so this is an as verbatim as possible copy of what is needed from
* the RTDM pet system.
* REMINDER: the RTAI dispatcher cares mask/ack-ing anyhow, but RTDM will
* (must) provide the most suitable one for the shared case. */
#ifndef CONFIG_RTAI_SCHED_ISR_LOCK
extern struct { volatile int locked, rqsted; } rt_scheduling[];
extern void rtai_isr_sched_handle(int);
#define RTAI_SCHED_ISR_LOCK() \
do { \
int cpuid = rtai_cpuid(); \
if (!rt_scheduling[cpuid].locked++) { \
rt_scheduling[cpuid].rqsted = 0; \
}
#define RTAI_SCHED_ISR_UNLOCK() \
rtai_cli(); \
if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \
if (rt_scheduling[cpuid].rqsted > 0) { \
rtai_isr_sched_handle(cpuid); \
} \
} \
} while (0)
#else /* !CONFIG_RTAI_SCHED_ISR_LOCK */
#define RTAI_SCHED_ISR_LOCK() \
do { } while (0)
#define RTAI_SCHED_ISR_UNLOCK() \
do { rtai_cli(); } while (0)
#endif /* CONFIG_RTAI_SCHED_ISR_LOCK */
#define XNINTR_MAX_UNHANDLED 1000
DEFINE_PRIVATE_XNLOCK(intrlock);
static void xnintr_irq_handler(unsigned irq, void *cookie);
#ifdef CONFIG_RTAI_RTDM_SHIRQ
typedef struct xnintr_irq {
DECLARE_XNLOCK(lock);
xnintr_t *handlers;
int unhandled;
} ____cacheline_aligned_in_smp xnintr_irq_t;
static xnintr_irq_t xnirqs[RTHAL_NR_IRQS];
static inline xnintr_t *xnintr_shirq_first(unsigned irq)
{
return xnirqs[irq].handlers;
}
static inline xnintr_t *xnintr_shirq_next(xnintr_t *prev)
{
return prev->next;
}
/*
* Low-level interrupt handler dispatching the user-defined ISRs for
* shared interrupts -- Called with interrupts off.
*/
static void xnintr_shirq_handler(unsigned irq, void *cookie)
{
xnintr_irq_t *shirq = &xnirqs[irq];
xnintr_t *intr;
int s = 0, ret;
RTAI_SCHED_ISR_LOCK();
xnlock_get(&shirq->lock);
intr = shirq->handlers;
while (intr) {
ret = intr->isr(intr);
s |= ret;
if (ret & XN_ISR_HANDLED) {
xnstat_counter_inc(
&intr->stat[xnsched_cpu(sched)].hits);
}
intr = intr->next;
}
xnlock_put(&shirq->lock);
if (unlikely(s == XN_ISR_NONE)) {
if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
"line.\n", __FUNCTION__, irq);
s |= XN_ISR_NOENABLE;
}
} else
shirq->unhandled = 0;
if (s & XN_ISR_PROPAGATE)
xnarch_chain_irq(irq);
else if (!(s & XN_ISR_NOENABLE))
xnarch_end_irq(irq);
RTAI_SCHED_ISR_UNLOCK();
}
/*
* Low-level interrupt handler dispatching the user-defined ISRs for
* shared edge-triggered interrupts -- Called with interrupts off.
*/
static void xnintr_edge_shirq_handler(unsigned irq, void *cookie)
{
const int MAX_EDGEIRQ_COUNTER = 128;
xnintr_irq_t *shirq = &xnirqs[irq];
int s = 0, counter = 0, ret, code;
struct xnintr *intr, *end = NULL;
RTAI_SCHED_ISR_LOCK();
xnlock_get(&shirq->lock);
intr = shirq->handlers;
while (intr != end) {
ret = intr->isr(intr);
code = ret & ~XN_ISR_BITMASK;
s |= ret;
if (code == XN_ISR_HANDLED) {
end = NULL;
xnstat_counter_inc(
&intr->stat[xnsched_cpu(sched)].hits);
} else if (end == NULL)
end = intr;
if (counter++ > MAX_EDGEIRQ_COUNTER)
break;
if (!(intr = intr->next))
intr = shirq->handlers;
}
xnlock_put(&shirq->lock);
if (counter > MAX_EDGEIRQ_COUNTER)
xnlogerr
("xnintr_edge_shirq_handler() : failed to get the IRQ%d line free.\n",
irq);
if (unlikely(s == XN_ISR_NONE)) {
if (++shirq->unhandled == XNINTR_MAX_UNHANDLED) {
xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
"line.\n", __FUNCTION__, irq);
s |= XN_ISR_NOENABLE;
}
} else
shirq->unhandled = 0;
if (s & XN_ISR_PROPAGATE)
xnarch_chain_irq(irq);
else if (!(s & XN_ISR_NOENABLE))
xnarch_end_irq(irq);
RTAI_SCHED_ISR_UNLOCK();
}
static inline int xnintr_irq_attach(xnintr_t *intr)
{
xnintr_irq_t *shirq = &xnirqs[intr->irq];
xnintr_t *prev, **p = &shirq->handlers;
int err;
if ((prev = *p) != NULL) {
/* Check on whether the shared mode is allowed. */
if (!(prev->flags & intr->flags & XN_ISR_SHARED) ||
(prev->iack != intr->iack)
|| ((prev->flags & XN_ISR_EDGE) !=
(intr->flags & XN_ISR_EDGE)))
return -EBUSY;
/* Get a position at the end of the list to insert the new element. */
while (prev) {
p = &prev->next;
prev = *p;
}
} else {
/* Initialize the corresponding interrupt channel */
void (*handler) (unsigned, void *) = &xnintr_irq_handler;
if (intr->flags & XN_ISR_SHARED) {
if (intr->flags & XN_ISR_EDGE)
handler = &xnintr_edge_shirq_handler;
else
handler = &xnintr_shirq_handler;
}
shirq->unhandled = 0;
err = xnarch_hook_irq(intr->irq, handler,
intr->iack, intr);
if (err)
return err;
}
intr->next = NULL;
/* Add the given interrupt object. No need to synchronise with the IRQ
handler, we are only extending the chain. */
*p = intr;
return 0;
}
static inline int xnintr_irq_detach(xnintr_t *intr)
{
xnintr_irq_t *shirq = &xnirqs[intr->irq];
xnintr_t *e, **p = &shirq->handlers;
int err = 0;
while ((e = *p) != NULL) {
if (e == intr) {
/* Remove the given interrupt object from the list. */
xnlock_get(&shirq->lock);
*p = e->next;
xnlock_put(&shirq->lock);
/* Release the IRQ line if this was the last user */
if (shirq->handlers == NULL)
err = xnarch_release_irq(intr->irq);
return err;
}
p = &e->next;
}
xnlogerr("attempted to detach a non previously attached interrupt "
"object.\n");
return err;
}
#else /* !CONFIG_RTAI_RTDM_SHIRQ */
#ifdef CONFIG_SMP
typedef struct xnintr_irq {
DECLARE_XNLOCK(lock);
} ____cacheline_aligned_in_smp xnintr_irq_t;
static xnintr_irq_t xnirqs[RTHAL_NR_IRQS];
#endif /* CONFIG_SMP */
static inline xnintr_t *xnintr_shirq_first(unsigned irq)
{
return xnarch_get_irq_cookie(irq);
}
static inline xnintr_t *xnintr_shirq_next(xnintr_t *prev)
{
return NULL;
}
static inline int xnintr_irq_attach(xnintr_t *intr)
{
return xnarch_hook_irq(intr->irq, &xnintr_irq_handler,
intr->iack, intr);
}
static inline int xnintr_irq_detach(xnintr_t *intr)
{
int irq = intr->irq, ret;
xnlock_get(&xnirqs[irq].lock);
ret = xnarch_release_irq(irq);
xnlock_put(&xnirqs[irq].lock);
return ret;
}
#endif /* !CONFIG_RTAI_RTDM_SHIRQ */
/*
* Low-level interrupt handler dispatching non-shared ISRs -- Called with
* interrupts off.
*/
static void xnintr_irq_handler(unsigned irq, void *cookie)
{
struct xnintr *intr;
int s;
RTAI_SCHED_ISR_LOCK();
xnlock_get(&xnirqs[irq].lock);
#ifdef CONFIG_SMP
/*
* In SMP case, we have to reload the cookie under the per-IRQ
* lock to avoid racing with xnintr_detach. However, we
* assume that no CPU migration will occur while running the
* interrupt service routine, so the scheduler pointer will
* remain valid throughout this function.
*/
intr = xnarch_get_irq_cookie(irq);
if (unlikely(!intr)) {
s = 0;
goto unlock_and_exit;
}
#else
/* cookie always valid, attach/detach happens with IRQs disabled */
intr = cookie;
#endif
s = intr->isr(intr);
if (unlikely(s == XN_ISR_NONE)) {
if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
xnlogerr("%s: IRQ%d not handled. Disabling IRQ "
"line.\n", __FUNCTION__, irq);
s |= XN_ISR_NOENABLE;
}
} else {
xnstat_counter_inc(&intr->stat[xnsched_cpu(sched)].hits);
intr->unhandled = 0;
}
#ifdef CONFIG_SMP
unlock_and_exit:
#endif
xnlock_put(&xnirqs[irq].lock);
if (s & XN_ISR_PROPAGATE)
xnarch_chain_irq(irq);
else if (!(s & XN_ISR_NOENABLE))
xnarch_end_irq(irq);
RTAI_SCHED_ISR_UNLOCK();
}
int xnintr_mount(void)
{
int i;
for (i = 0; i < RTHAL_NR_IRQS; ++i)
xnlock_init(&xnirqs[i].lock);
return 0;
}
int xnintr_init(xnintr_t *intr,
const char *name,
unsigned irq, xnisr_t isr, xniack_t iack, xnflags_t flags)
{
if (irq >= RTHAL_NR_IRQS)
return -EINVAL;
intr->irq = irq;
intr->isr = isr;
intr->iack = iack;
intr->cookie = NULL;
intr->name = name ? : "<unknown>";
intr->flags = flags;
intr->unhandled = 0;
memset(&intr->stat, 0, sizeof(intr->stat));
#ifdef CONFIG_RTAI_RTDM_SHIRQ
intr->next = NULL;
#endif
return 0;
}
EXPORT_SYMBOL_GPL(xnintr_init);
int xnintr_destroy(xnintr_t *intr)
{
return xnintr_detach(intr);
}
EXPORT_SYMBOL_GPL(xnintr_destroy);
int xnintr_attach(xnintr_t *intr, void *cookie)
{
int ret;
spl_t s;
intr->cookie = cookie;
memset(&intr->stat, 0, sizeof(intr->stat));
#ifdef CONFIG_SMP
xnarch_set_irq_affinity(intr->irq, nkaffinity);
#endif /* CONFIG_SMP */
xnlock_get_irqsave(&intrlock, s);
if (__testbits(intr->flags, XN_ISR_ATTACHED)) {
ret = -EBUSY;
goto out;
}
ret = xnintr_irq_attach(intr);
if (ret)
goto out;
__setbits(intr->flags, XN_ISR_ATTACHED);
out:
xnlock_put_irqrestore(&intrlock, s);
return ret;
}
EXPORT_SYMBOL_GPL(xnintr_attach);
int xnintr_detach(xnintr_t *intr)
{
int ret;
spl_t s;
xnlock_get_irqsave(&intrlock, s);
if (!__testbits(intr->flags, XN_ISR_ATTACHED)) {
ret = -EINVAL;
goto out;
}
__clrbits(intr->flags, XN_ISR_ATTACHED);
ret = xnintr_irq_detach(intr);
if (ret)
goto out;
out:
xnlock_put_irqrestore(&intrlock, s);
return ret;
}
EXPORT_SYMBOL_GPL(xnintr_detach);
int xnintr_enable(xnintr_t *intr)
{
rt_enable_irq(intr->irq);
return 0;
}
EXPORT_SYMBOL_GPL(xnintr_enable);
int xnintr_disable(xnintr_t *intr)
{
rt_disable_irq(intr->irq);
return 0;
}
EXPORT_SYMBOL_GPL(xnintr_disable);
typedef cpumask_t xnarch_cpumask_t;
void xnintr_affinity(xnintr_t *intr, xnarch_cpumask_t cpumask)
{
trace_mark(xn_nucleus, irq_affinity, "irq %u %lu",
intr->irq, *(unsigned long *)&cpumask);
xnarch_set_irq_affinity(intr->irq, cpumask);
}
EXPORT_SYMBOL_GPL(xnintr_affinity);
extern struct epoch_struct boot_epoch;
#ifdef CONFIG_SMP
#define NUM_CPUS RTAI_NR_CPUS
#define TIMED_TIMER_CPUID (timed_timer->cpuid)
#define TIMER_CPUID (timer->cpuid)
#define LIST_CPUID (cpuid)
#else
#define NUM_CPUS 1
#define TIMED_TIMER_CPUID (0)
#define TIMER_CPUID (0)
#define LIST_CPUID (0)
#endif
static struct rtdm_timer_struct timers_list[NUM_CPUS] =
{ { &timers_list[0], &timers_list[0], RT_SCHED_LOWEST_PRIORITY, 0, RT_TIME_END, 0LL, NULL, 0UL,
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
{ NULL }
#endif
}, };
//static spinlock_t timers_lock[NUM_CPUS] = { SPIN_LOCK_UNLOCKED, };
static spinlock_t timers_lock[NUM_CPUS] = { __SPIN_LOCK_UNLOCKED(timers_lock[0]), };
#ifdef CONFIG_RTAI_LONG_TIMED_LIST
/* BINARY TREE */
static inline void enq_timer(struct rtdm_timer_struct *timed_timer)
{
struct rtdm_timer_struct *timerh, *tmrnxt, *timer;
rb_node_t **rbtn, *rbtpn = NULL;
timer = timerh = &timers_list[TIMED_TIMER_CPUID];
rbtn = &timerh->rbr.rb_node;
while (*rbtn) {
rbtpn = *rbtn;
tmrnxt = rb_entry(rbtpn, struct rtdm_timer_struct, rbn);
if (timer->firing_time > tmrnxt->firing_time) {
rbtn = &(rbtpn)->rb_right;
} else {
rbtn = &(rbtpn)->rb_left;
timer = tmrnxt;
}
}
rb_link_node(&timed_timer->rbn, rbtpn, rbtn);
rb_insert_color(&timed_timer->rbn, &timerh->rbr);
timer->prev = (timed_timer->prev = timer->prev)->next = timed_timer;
timed_timer->next = timer;
}
#define rb_erase_timer(timer) \
rb_erase(&(timer)->rbn, &timers_list[NUM_CPUS > 1 ? (timer)->cpuid : 0].rbr)
#else /* !CONFIG_RTAI_LONG_TIMED_LIST */
/* LINEAR */
static inline void enq_timer(struct rtdm_timer_struct *timed_timer)
{
struct rtdm_timer_struct *timer;
timer = &timers_list[TIMED_TIMER_CPUID];
while (timed_timer->firing_time > (timer = timer->next)->firing_time);
timer->prev = (timed_timer->prev = timer->prev)->next = timed_timer;
timed_timer->next = timer;
}
#define rb_erase_timer(timer)
#endif /* CONFIG_RTAI_LONG_TIMED_LIST */
static inline void rem_timer(struct rtdm_timer_struct *timer)
{
(timer->next)->prev = timer->prev;
(timer->prev)->next = timer->next;
timer->next = timer->prev = timer;
rb_erase_timer(timer);
}
static RT_TASK timers_manager[NUM_CPUS];
static inline void asgn_min_prio(int cpuid)
{
RT_TASK *timer_manager;
struct rtdm_timer_struct *timer, *timerl;
spinlock_t *lock;
unsigned long flags;
int priority;
priority = (timer = (timerl = &timers_list[LIST_CPUID])->next)->priority;
flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]);
while ((timer = timer->next) != timerl) {
if (timer->priority < priority) {
priority = timer->priority;
}
rt_spin_unlock_irqrestore(flags, lock);
flags = rt_spin_lock_irqsave(lock);
}
rt_spin_unlock_irqrestore(flags, lock);
flags = rt_global_save_flags_and_cli();
if ((timer_manager = &timers_manager[LIST_CPUID])->priority > priority) {
timer_manager->priority = priority;
if (timer_manager->state == RT_SCHED_READY) {
rem_ready_task(timer_manager);
enq_ready_task(timer_manager);
}
}
rt_global_restore_flags(flags);
}
RTAI_SYSCALL_MODE int rt_timer_insert(struct rtdm_timer_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data)
{
spinlock_t *lock;
unsigned long flags, cpuid;
RT_TASK *timer_manager;
if (!handler) {
return -EINVAL;
}
timer->handler = handler;
timer->data = data;
timer->priority = priority;
timer->firing_time = firing_time;
timer->period = period;
REALTIME2COUNT(firing_time)
timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0;
// timer insertion in timers_list
flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]);
enq_timer(timer);
rt_spin_unlock_irqrestore(flags, lock);
// timers_manager priority inheritance
if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) {
timer_manager->priority = timer->priority;
}
// timers_task deadline inheritance
flags = rt_global_save_flags_and_cli();
if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) {
timer_manager->resume_time = firing_time;
rem_timed_task(timer_manager);
enq_timed_task(timer_manager);
rt_schedule();
}
rt_global_restore_flags(flags);
return 0;
}
RTAI_SYSCALL_MODE void rt_timer_remove(struct rtdm_timer_struct *timer)
{
if (timer->next != timer && timer->prev != timer) {
spinlock_t *lock;
unsigned long flags;
flags = rt_spin_lock_irqsave(lock = &timers_lock[TIMER_CPUID]);
rem_timer(timer);
rt_spin_unlock_irqrestore(flags, lock);
asgn_min_prio(TIMER_CPUID);
}
}
static int TimersManagerPrio = 0;
RTAI_MODULE_PARM(TimersManagerPrio, int);
static void rt_timers_manager(long cpuid)
{
RTIME now;
RT_TASK *timer_manager;
struct rtdm_timer_struct *tmr, *timer, *timerl;
spinlock_t *lock;
unsigned long flags, timer_tol;
int priority;
timer_manager = &timers_manager[LIST_CPUID];
timerl = &timers_list[LIST_CPUID];
lock = &timers_lock[LIST_CPUID];
timer_tol = tuned.timers_tol[LIST_CPUID];
while (1) {
rt_sleep_until((timerl->next)->firing_time);
now = rt_get_time() + timer_tol;
while (1) {
tmr = timer = timerl;
priority = RT_SCHED_LOWEST_PRIORITY;
flags = rt_spin_lock_irqsave(lock);
while ((tmr = tmr->next)->firing_time <= now) {
if (tmr->priority < priority) {
priority = (timer = tmr)->priority;
}
}
rt_spin_unlock_irqrestore(flags, lock);
if (timer == timerl) {
if (timer_manager->priority > TimersManagerPrio) {
timer_manager->priority = TimersManagerPrio;
}
break;
}
timer_manager->priority = priority;
flags = rt_spin_lock_irqsave(lock);
rem_timer(timer);
if (timer->period) {
timer->firing_time += timer->period;
enq_timer(timer);
}
rt_spin_unlock_irqrestore(flags, lock);
timer->handler(timer->data);
}
asgn_min_prio(LIST_CPUID);
}
}
static int TimersManagerStacksize = 8192;
RTAI_MODULE_PARM(TimersManagerStacksize, int);
static int rtai_timers_init(void)
{
int cpuid;
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
timers_lock[cpuid] = timers_lock[0];
timers_list[cpuid] = timers_list[0];
timers_list[cpuid].cpuid = cpuid;
timers_list[cpuid].next = timers_list[cpuid].prev = &timers_list[cpuid];
}
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
rt_task_init_cpuid(&timers_manager[cpuid], rt_timers_manager, cpuid, TimersManagerStacksize, TimersManagerPrio, 0, NULL, cpuid);
rt_task_resume(&timers_manager[cpuid]);
}
return 0;
}
static void rtai_timers_cleanup(void)
{
int cpuid;
for (cpuid = 0; cpuid < num_online_cpus(); cpuid++) {
rt_task_delete(&timers_manager[cpuid]);
}
}
EXPORT_SYMBOL(rt_timer_insert);
EXPORT_SYMBOL(rt_timer_remove);
int __init rtdm_skin_init(void)
{
int err;
rtai_timers_init();
if(set_rt_fun_ext_index(rtdm, RTDM_INDX)) {
printk("LXRT extension %d already in use. Recompile RTDM with a different extension index\n", RTDM_INDX);
return -EACCES;
}
if ((err = rtdm_dev_init())) {
goto fail;
}
xnintr_mount();
#ifdef CONFIG_RTAI_RTDM_SELECT
if (xnselect_mount()) {
goto cleanup_core;
}
#endif
#ifdef CONFIG_PROC_FS
if ((err = rtdm_proc_init())) {
goto cleanup_core;
}
#endif /* CONFIG_PROC_FS */
printk("RTDM started.\n");
return 0;
cleanup_core:
#ifdef CONFIG_RTAI_RTDM_SELECT
xnselect_umount();
#endif
rtdm_dev_cleanup();
#ifdef CONFIG_PROC_FS
rtdm_proc_cleanup();
#endif /* CONFIG_PROC_FS */
fail:
return err;
}
void __exit rtdm_skin_exit(void)
{
#ifdef CONFIG_RTAI_RTDM_SELECT
xnselect_umount();
#endif
rtai_timers_cleanup();
rtdm_dev_cleanup();
reset_rt_fun_ext_index(rtdm, RTDM_INDX);
#ifdef CONFIG_PROC_FS
rtdm_proc_cleanup();
#endif /* CONFIG_PROC_FS */
printk("RTDM stopped.\n");
}
module_init(rtdm_skin_init);
module_exit(rtdm_skin_exit);

View file

@ -1,445 +0,0 @@
/*
* Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
* Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
*
* adapted to RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "rtdm/internal.h"
#include <rtdm/vfile.h>
struct xnvfile_directory rtdm_vfroot; /* /proc/rtai/rtdm */
struct vfile_device_data {
int h;
int hmax;
struct list_head *devmap;
struct list_head *curr;
};
static int get_nrt_lock(struct xnvfile *vfile)
{
return down_interruptible(&nrt_dev_lock) ? -ERESTARTSYS : 0;
}
static void put_nrt_lock(struct xnvfile *vfile)
{
up(&nrt_dev_lock);
}
static struct xnvfile_lock_ops lockops = {
.get = get_nrt_lock,
.put = put_nrt_lock,
};
static struct list_head *next_devlist(struct vfile_device_data *priv)
{
struct list_head *head;
while (priv->h < priv->hmax) {
head = priv->devmap + priv->h;
if (!list_empty(head))
return head;
priv->h++;
}
return NULL;
}
static void *next_dev(struct xnvfile_regular_iterator *it)
{
struct vfile_device_data *priv = xnvfile_iterator_priv(it);
struct list_head *next;
next = priv->curr->next;
seek:
if (next == priv->devmap + priv->h) {
/* Done with the current hash slot, let's progress. */
if (priv->h >= priv->hmax) {
next = NULL; /* all done. */
goto out;
}
priv->h++;
next = next_devlist(priv);
if (next) {
next = next->next; /* skip head. */
goto seek;
}
}
out:
priv->curr = next;
return next;
}
static void *named_begin(struct xnvfile_regular_iterator *it)
{
struct vfile_device_data *priv = xnvfile_iterator_priv(it);
struct list_head *devlist;
loff_t pos = 0;
priv->devmap = rtdm_named_devices;
priv->hmax = devname_hashtab_size;
priv->h = 0;
devlist = next_devlist(priv);
if (devlist == NULL)
return NULL; /* All devlists empty. */
priv->curr = devlist->next; /* Skip head. */
/*
* priv->curr now points to the first device; advance to the requested
* position from there.
*/
while (priv->curr && pos++ < it->pos)
priv->curr = next_dev(it);
if (pos == 1)
/* Output the header once, only if some device follows. */
xnvfile_puts(it, "Hash\tName\t\t\t\tDriver\t\t/proc\n");
return priv->curr;
}
static int named_show(struct xnvfile_regular_iterator *it, void *data)
{
struct vfile_device_data *priv = xnvfile_iterator_priv(it);
struct list_head *curr = data;
struct rtdm_device *device;
device = list_entry(curr, struct rtdm_device, reserved.entry);
xnvfile_printf(it, "%02X\t%-31s\t%-15s\t%s\n",
priv->h, device->device_name,
device->driver_name,
device->proc_name);
return 0;
}
static struct xnvfile_regular_ops named_vfile_ops = {
.begin = named_begin,
.next = next_dev,
.show = named_show,
};
static struct xnvfile_regular named_vfile = {
.privsz = sizeof(struct vfile_device_data),
.ops = &named_vfile_ops,
.entry = { .lockops = &lockops }
};
static void *proto_begin(struct xnvfile_regular_iterator *it)
{
struct vfile_device_data *priv = xnvfile_iterator_priv(it);
struct list_head *devlist;
loff_t pos = 0;
priv->devmap = rtdm_protocol_devices;
priv->hmax = protocol_hashtab_size;
priv->h = 0;
devlist = next_devlist(priv);
if (devlist == NULL)
return NULL; /* All devlists empty. */
priv->curr = devlist->next; /* Skip head. */
/*
* priv->curr now points to the first device; advance to the requested
* position from there.
*/
while (priv->curr && pos++ < it->pos)
priv->curr = next_dev(it);
if (pos == 1)
/* Output the header once, only if some device follows. */
xnvfile_puts(it, "Hash\tName\t\t\t\tDriver\t\t/proc\n");
return priv->curr;
}
static int proto_show(struct xnvfile_regular_iterator *it, void *data)
{
struct vfile_device_data *priv = xnvfile_iterator_priv(it);
struct list_head *curr = data;
struct rtdm_device *device;
char pnum[32];
device = list_entry(curr, struct rtdm_device, reserved.entry);
snprintf(pnum, sizeof(pnum), "%u:%u",
device->protocol_family, device->socket_type);
xnvfile_printf(it, "%02X\t%-31s\t%-15s\t%s\n",
priv->h,
pnum, device->driver_name,
device->proc_name);
return 0;
}
static struct xnvfile_regular_ops proto_vfile_ops = {
.begin = proto_begin,
.next = next_dev,
.show = proto_show,
};
static struct xnvfile_regular proto_vfile = {
.privsz = sizeof(struct vfile_device_data),
.ops = &proto_vfile_ops,
.entry = { .lockops = &lockops }
};
static void *openfd_begin(struct xnvfile_regular_iterator *it)
{
if (it->pos == 0)
return VFILE_SEQ_START;
return it->pos <= RTDM_FD_MAX ? it : NULL;
}
static void *openfd_next(struct xnvfile_regular_iterator *it)
{
if (it->pos > RTDM_FD_MAX)
return NULL;
return it;
}
static int openfd_show(struct xnvfile_regular_iterator *it, void *data)
{
struct rtdm_dev_context *context;
struct rtdm_device *device;
struct rtdm_process owner;
int close_lock_count, fd;
spl_t s;
if (data == NULL) {
xnvfile_puts(it, "Index\tLocked\tDevice\t\t\t\tOwner [PID]\n");
return 0;
}
fd = (int)it->pos - 1;
xnlock_get_irqsave(&rt_fildes_lock, s);
context = fildes_table[fd].context;
if (context == NULL) {
xnlock_put_irqrestore(&rt_fildes_lock, s);
return VFILE_SEQ_SKIP;
}
close_lock_count = atomic_read(&context->close_lock_count);
device = context->device;
if (context->reserved.owner)
memcpy(&owner, context->reserved.owner, sizeof(owner));
else {
strcpy(owner.name, "<kernel>");
owner.pid = -1;
}
xnlock_put_irqrestore(&rt_fildes_lock, s);
xnvfile_printf(it, "%d\t%d\t%-31s %s [%d]\n", fd,
close_lock_count,
(device->device_flags & RTDM_NAMED_DEVICE) ?
device->device_name : device->proc_name,
owner.name, owner.pid);
return 0;
}
static ssize_t openfd_store(struct xnvfile_input *input)
{
ssize_t ret, cret;
long val;
ret = xnvfile_get_integer(input, &val);
if (ret < 0)
return ret;
cret = __rt_dev_close(current, (int)val);
if (cret < 0)
return cret;
return ret;
}
static struct xnvfile_regular_ops openfd_vfile_ops = {
.begin = openfd_begin,
.next = openfd_next,
.show = openfd_show,
.store = openfd_store,
};
static struct xnvfile_regular openfd_vfile = {
.ops = &openfd_vfile_ops,
.entry = { .lockops = &lockops }
};
static int allfd_vfile_show(struct xnvfile_regular_iterator *it, void *data)
{
xnvfile_printf(it, "total=%d:open=%d:free=%d\n", RTDM_FD_MAX,
open_fildes, RTDM_FD_MAX - open_fildes);
return 0;
}
static struct xnvfile_regular_ops allfd_vfile_ops = {
.show = allfd_vfile_show,
};
static struct xnvfile_regular allfd_vfile = {
.ops = &allfd_vfile_ops,
};
static int devinfo_vfile_show(struct xnvfile_regular_iterator *it, void *data)
{
struct rtdm_device *device;
int i;
if (down_interruptible(&nrt_dev_lock))
return -ERESTARTSYS;
/*
* As the device may have disappeared while the handler was called,
* first match the pointer against registered devices.
*/
for (i = 0; i < devname_hashtab_size; i++)
list_for_each_entry(device, &rtdm_named_devices[i],
reserved.entry)
if (device == xnvfile_priv(it->vfile))
goto found;
for (i = 0; i < protocol_hashtab_size; i++)
list_for_each_entry(device, &rtdm_protocol_devices[i],
reserved.entry)
if (device == xnvfile_priv(it->vfile))
goto found;
up(&nrt_dev_lock);
return -ENODEV;
found:
xnvfile_printf(it, "driver:\t\t%s\nversion:\t%d.%d.%d\n",
device->driver_name,
RTDM_DRIVER_MAJOR_VER(device->driver_version),
RTDM_DRIVER_MINOR_VER(device->driver_version),
RTDM_DRIVER_PATCH_VER(device->driver_version));
xnvfile_printf(it, "peripheral:\t%s\nprovider:\t%s\n",
device->peripheral_name, device->provider_name);
xnvfile_printf(it, "class:\t\t%d\nsub-class:\t%d\n",
device->device_class, device->device_sub_class);
xnvfile_printf(it, "flags:\t\t%s%s%s\n",
(device->device_flags & RTDM_EXCLUSIVE) ?
"EXCLUSIVE " : "",
(device->device_flags & RTDM_NAMED_DEVICE) ?
"NAMED_DEVICE " : "",
(device->device_flags & RTDM_PROTOCOL_DEVICE) ?
"PROTOCOL_DEVICE " : "");
xnvfile_printf(it, "lock count:\t%d\n",
atomic_read(&device->reserved.refcount));
up(&nrt_dev_lock);
return 0;
}
static struct xnvfile_regular_ops devinfo_vfile_ops = {
.show = devinfo_vfile_show,
};
int rtdm_proc_register_device(struct rtdm_device *device)
{
int ret;
ret = xnvfile_init_dir(device->proc_name,
&device->vfroot, &rtdm_vfroot);
if (ret)
goto err_out;
memset(&device->info_vfile, 0, sizeof(device->info_vfile));
device->info_vfile.ops = &devinfo_vfile_ops;
ret = xnvfile_init_regular("information", &device->info_vfile,
&device->vfroot);
if (ret) {
xnvfile_destroy_dir(&device->vfroot);
goto err_out;
}
xnvfile_priv(&device->info_vfile) = device;
return 0;
err_out:
xnlogerr("RTDM: error while creating device vfile\n");
return ret;
}
void rtdm_proc_unregister_device(struct rtdm_device *device)
{
xnvfile_destroy_regular(&device->info_vfile);
xnvfile_destroy_dir(&device->vfroot);
}
int __init rtdm_proc_init(void)
{
int ret;
/* Initialise vfiles */
// ret = xnvfile_init_root(); /proc/rtai is initted elsewhere
ret = xnvfile_init_dir("rtai/rtdm", &rtdm_vfroot, &nkvfroot);
if (ret)
goto error;
ret = xnvfile_init_regular("named_devices", &named_vfile, &rtdm_vfroot);
if (ret)
goto error;
ret = xnvfile_init_regular("protocol_devices", &proto_vfile, &rtdm_vfroot);
if (ret)
goto error;
ret = xnvfile_init_regular("open_fildes", &openfd_vfile, &rtdm_vfroot);
if (ret)
goto error;
ret = xnvfile_init_regular("fildes", &allfd_vfile, &rtdm_vfroot);
if (ret)
goto error;
return 0;
error:
rtdm_proc_cleanup();
return ret;
}
void rtdm_proc_cleanup(void)
{
xnvfile_destroy_regular(&allfd_vfile);
xnvfile_destroy_regular(&openfd_vfile);
xnvfile_destroy_regular(&proto_vfile);
xnvfile_destroy_regular(&named_vfile);
xnvfile_destroy_dir(&rtdm_vfroot);
// xnvfile_destroy_root(); /proc/rtai is destroyed elsewhere
}

View file

@ -1,32 +0,0 @@
/*
* Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
* Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _RTDM_PROC_H
#define _RTDM_PROC_H
extern struct proc_dir_entry *rtdm_proc_root;
int rtdm_proc_register_device(struct rtdm_device* device);
int __init rtdm_proc_init(void);
void rtdm_proc_cleanup(void);
#endif /* _RTDM_PROC_H */

View file

@ -1,164 +0,0 @@
/*
* Copyright (C) 2008-2010 Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include "rtai_taskq.h"
#ifdef CONFIG_SMP
volatile unsigned long tosched_mask;
#endif
void rt_schedule_readied(void)
{
unsigned long flags;
#ifdef CONFIG_SMP
unsigned long cpumask, rmask, lmask;
flags = rt_global_save_flags_and_cli();
lmask = tosched_mask;
tosched_mask = 0;
rt_global_restore_flags(flags);
rmask = lmask & ~(cpumask = (1 << rtai_cpuid()));
if (rmask) {
rtai_save_flags_and_cli(flags);
send_sched_ipi(rmask);
rtai_restore_flags(flags);
}
if (lmask | cpumask)
#endif
{
flags = rt_global_save_flags_and_cli();
rt_schedule();
rt_global_restore_flags(flags);
}
}
EXPORT_SYMBOL(rt_schedule_readied);
void rt_taskq_init(TASKQ *taskq, unsigned long type)
{
taskq->qtype = (type & TASKQ_FIFO) ? 1 : 0;
taskq->queue = (QUEUE) { &taskq->queue, &taskq->queue, NULL };
}
EXPORT_SYMBOL(rt_taskq_init);
RT_TASK *rt_taskq_ready_one(TASKQ *taskq)
{
unsigned long flags;
RT_TASK *task = NULL;
flags = rt_global_save_flags_and_cli();
if ((task = (taskq->queue.next)->task)) {
dequeue_blocked(task);
rem_timed_task(task);
if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_TASKQ | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
enq_ready_task(task);
TOSCHED_TASK(task);
}
}
rt_global_restore_flags(flags);
return task;
}
EXPORT_SYMBOL(rt_taskq_ready_one);
int rt_taskq_ready_all(TASKQ *taskq, unsigned long why)
{
unsigned long flags, tosched;
RT_TASK *task;
QUEUE *q;
tosched = 0;
q = &(taskq->queue);
flags = rt_global_save_flags_and_cli();
while ((q = q->next) != &(taskq->queue)) {
if ((task = q->task)) {
dequeue_blocked(task = q->task);
rem_timed_task(task);
task->retval = why;
if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_TASKQ | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
enq_ready_task(task);
TOSCHED_TASK(task);
tosched = 1;
}
}
rt_global_restore_flags(flags);
flags = rt_global_save_flags_and_cli();
}
rt_global_restore_flags(flags);
return tosched;
}
EXPORT_SYMBOL(rt_taskq_ready_all);
void rt_taskq_wait(TASKQ *taskq)
{
RT_TASK *rt_current;
unsigned long flags;
void *retp;
flags = rt_global_save_flags_and_cli();
rt_current = RT_CURRENT;
rt_current->retval = 0;
rt_current->state |= RT_SCHED_TASKQ;
rem_ready_current(rt_current);
enqueue_blocked(rt_current, &taskq->queue, taskq->qtype);
rt_schedule();
if (unlikely((retp = rt_current->blocked_on) != NULL)) {
if (likely(retp != RTP_OBJREM)) {
dequeue_blocked(rt_current);
rt_current->retval = XNBREAK;
} else {
rt_current->prio_passed_to = NULL;
rt_current->retval = XNRMID;
}
}
rt_global_restore_flags(flags);
}
EXPORT_SYMBOL(rt_taskq_wait);
void rt_taskq_wait_until(TASKQ *taskq, RTIME time)
{
DECLARE_RT_CURRENT;
unsigned long flags;
void *retp;
REALTIME2COUNT(time);
flags = rt_global_save_flags_and_cli();
ASSIGN_RT_CURRENT;
rt_current->retval = 0;
rt_current->blocked_on = &taskq->queue;
if ((rt_current->resume_time = time) > rt_time_h) {
rt_current->state |= (RT_SCHED_TASKQ | RT_SCHED_DELAYED);
rem_ready_current(rt_current);
enqueue_blocked(rt_current, &taskq->queue, taskq->qtype);
enq_timed_task(rt_current);
rt_schedule();
}
if (unlikely((retp = rt_current->blocked_on) != NULL)) {
if (likely(retp != RTP_OBJREM)) {
dequeue_blocked(rt_current);
rt_current->retval = retp > RTP_HIGERR ? XNTIMEO : XNBREAK;
} else {
rt_current->prio_passed_to = NULL;
rt_current->retval = XNRMID;
}
}
rt_global_restore_flags(flags);
}
EXPORT_SYMBOL(rt_taskq_wait_until);

File diff suppressed because it is too large Load diff

View file

@ -1,522 +0,0 @@
/**
* @file
* Real-Time Driver Model for RTAI, serial device profile header
*
* @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
*
* with adaptions for RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* @ingroup rtserial
*/
/*!
* @ingroup profiles
* @defgroup rtserial Serial Devices
*
* This is the common interface a RTDM-compliant serial device has to provide.
* Feel free to comment on this profile via the RTAI mailing list
* (rtai@rtai.org) or directly to the author (jan.kiszka@web.de).
*
* @b Profile @b Revision: 3
* @n
* @n
* @par Device Characteristics
* @ref rtdm_device.device_flags "Device Flags": @c RTDM_NAMED_DEVICE, @c RTDM_EXCLUSIVE @n
* @n
* @ref rtdm_device.device_name "Device Name": @c "rtser<N>", N >= 0 @n
* @n
* @ref rtdm_device.device_class "Device Class": @c RTDM_CLASS_SERIAL @n
* @n
*
* @par Supported Operations
* @b Open @n
* Environments: non-RT (RT optional, deprecated)@n
* Specific return values: none @n
* @n
* @b Close @n
* Environments: non-RT (RT optional, deprecated)@n
* Specific return values: none @n
* @n
* @b IOCTL @n
* Mandatory Environments: see @ref SERIOCTLs "below" @n
* Specific return values: see @ref SERIOCTLs "below" @n
* @n
* @b Read @n
* Environments: RT (non-RT optional)@n
* Specific return values:
* - -ETIMEDOUT
* - -EINTR (interrupted explicitly or by signal)
* - -EAGAIN (no data available in non-blocking mode)
* - -EBADF (device has been closed while reading)
* - -EIO (hardware error or broken bit stream)
* .
* @n
* @b Write @n
* Environments: RT (non-RT optional)@n
* Specific return values:
* - -ETIMEDOUT
* - -EINTR (interrupted explicitly or by signal)
* - -EAGAIN (no data written in non-blocking mode)
* - -EBADF (device has been closed while writing)
*
* @{
*/
#ifndef _RTSERIAL_H
#define _RTSERIAL_H
#include <rtdm/rtdm.h>
#define RTSER_PROFILE_VER 3
/*!
* @anchor RTSER_DEF_BAUD @name RTSER_DEF_BAUD
* Default baud rate
* @{ */
#define RTSER_DEF_BAUD 9600
/** @} */
/*!
* @anchor RTSER_xxx_PARITY @name RTSER_xxx_PARITY
* Number of parity bits
* @{ */
#define RTSER_NO_PARITY 0x00
#define RTSER_ODD_PARITY 0x01
#define RTSER_EVEN_PARITY 0x03
#define RTSER_DEF_PARITY RTSER_NO_PARITY
/** @} */
/*!
* @anchor RTSER_xxx_BITS @name RTSER_xxx_BITS
* Number of data bits
* @{ */
#define RTSER_5_BITS 0x00
#define RTSER_6_BITS 0x01
#define RTSER_7_BITS 0x02
#define RTSER_8_BITS 0x03
#define RTSER_DEF_BITS RTSER_8_BITS
/** @} */
/*!
* @anchor RTSER_xxx_STOPB @name RTSER_xxx_STOPB
* Number of stop bits
* @{ */
#define RTSER_1_STOPB 0x00
/** valid only in combination with 5 data bits */
#define RTSER_1_5_STOPB 0x01
#define RTSER_2_STOPB 0x01
#define RTSER_DEF_STOPB RTSER_1_STOPB
/** @} */
/*!
* @anchor RTSER_xxx_HAND @name RTSER_xxx_HAND
* Handshake mechanisms
* @{ */
#define RTSER_NO_HAND 0x00
#define RTSER_RTSCTS_HAND 0x01
#define RTSER_DEF_HAND RTSER_NO_HAND
/** @} */
/*!
* @anchor RTSER_RS485_xxx @name RTSER_RS485_xxx
* RS485 mode with automatic RTS handling
* @{ */
#define RTSER_RS485_DISABLE 0x00
#define RTSER_RS485_ENABLE 0x01
#define RTSER_DEF_RS485 RTSER_RS485_DISABLE
/** @} */
/*!
* @anchor RTSER_FIFO_xxx @name RTSER_FIFO_xxx
* Reception FIFO interrupt threshold
* @{ */
#define RTSER_FIFO_DEPTH_1 0x00
#define RTSER_FIFO_DEPTH_4 0x40
#define RTSER_FIFO_DEPTH_8 0x80
#define RTSER_FIFO_DEPTH_14 0xC0
#define RTSER_DEF_FIFO_DEPTH RTSER_FIFO_DEPTH_1
/** @} */
/*!
* @anchor RTSER_TIMEOUT_xxx @name RTSER_TIMEOUT_xxx
* Special timeout values, see also @ref RTDM_TIMEOUT_xxx
* @{ */
#define RTSER_TIMEOUT_INFINITE RTDM_TIMEOUT_INFINITE
#define RTSER_TIMEOUT_NONE RTDM_TIMEOUT_NONE
#define RTSER_DEF_TIMEOUT RTDM_TIMEOUT_INFINITE
/** @} */
/*!
* @anchor RTSER_xxx_TIMESTAMP_HISTORY @name RTSER_xxx_TIMESTAMP_HISTORY
* Timestamp history control
* @{ */
#define RTSER_RX_TIMESTAMP_HISTORY 0x01
#define RTSER_DEF_TIMESTAMP_HISTORY 0x00
/** @} */
/*!
* @anchor RTSER_EVENT_xxx @name RTSER_EVENT_xxx
* Events bits
* @{ */
#define RTSER_EVENT_RXPEND 0x01
#define RTSER_EVENT_ERRPEND 0x02
#define RTSER_EVENT_MODEMHI 0x04
#define RTSER_EVENT_MODEMLO 0x08
#define RTSER_EVENT_TXEMPTY 0x10
#define RTSER_DEF_EVENT_MASK 0x00
/** @} */
/*!
* @anchor RTSER_SET_xxx @name RTSER_SET_xxx
* Configuration mask bits
* @{ */
#define RTSER_SET_BAUD 0x0001
#define RTSER_SET_PARITY 0x0002
#define RTSER_SET_DATA_BITS 0x0004
#define RTSER_SET_STOP_BITS 0x0008
#define RTSER_SET_HANDSHAKE 0x0010
#define RTSER_SET_FIFO_DEPTH 0x0020
#define RTSER_SET_TIMEOUT_RX 0x0100
#define RTSER_SET_TIMEOUT_TX 0x0200
#define RTSER_SET_TIMEOUT_EVENT 0x0400
#define RTSER_SET_TIMESTAMP_HISTORY 0x0800
#define RTSER_SET_EVENT_MASK 0x1000
#define RTSER_SET_RS485 0x2000
/** @} */
/*!
* @anchor RTSER_LSR_xxx @name RTSER_LSR_xxx
* Line status bits
* @{ */
#define RTSER_LSR_DATA 0x01
#define RTSER_LSR_OVERRUN_ERR 0x02
#define RTSER_LSR_PARITY_ERR 0x04
#define RTSER_LSR_FRAMING_ERR 0x08
#define RTSER_LSR_BREAK_IND 0x10
#define RTSER_LSR_THR_EMTPY 0x20
#define RTSER_LSR_TRANSM_EMPTY 0x40
#define RTSER_LSR_FIFO_ERR 0x80
#define RTSER_SOFT_OVERRUN_ERR 0x0100
/** @} */
/*!
* @anchor RTSER_MSR_xxx @name RTSER_MSR_xxx
* Modem status bits
* @{ */
#define RTSER_MSR_DCTS 0x01
#define RTSER_MSR_DDSR 0x02
#define RTSER_MSR_TERI 0x04
#define RTSER_MSR_DDCD 0x08
#define RTSER_MSR_CTS 0x10
#define RTSER_MSR_DSR 0x20
#define RTSER_MSR_RI 0x40
#define RTSER_MSR_DCD 0x80
/** @} */
/*!
* @anchor RTSER_MCR_xxx @name RTSER_MCR_xxx
* Modem control bits
* @{ */
#define RTSER_MCR_DTR 0x01
#define RTSER_MCR_RTS 0x02
#define RTSER_MCR_OUT1 0x04
#define RTSER_MCR_OUT2 0x08
#define RTSER_MCR_LOOP 0x10
/** @} */
/*!
* @anchor RTSER_BREAK_xxx @name RTSER_BREAK_xxx
* Break control
* @{ */
#define RTSER_BREAK_CLR 0x00
#define RTSER_BREAK_SET 0x01
/**
* Serial device configuration
*/
typedef struct rtser_config {
/** mask specifying valid fields, see @ref RTSER_SET_xxx */
int config_mask;
/** baud rate, default @ref RTSER_DEF_BAUD */
int baud_rate;
/** number of parity bits, see @ref RTSER_xxx_PARITY */
int parity;
/** number of data bits, see @ref RTSER_xxx_BITS */
int data_bits;
/** number of stop bits, see @ref RTSER_xxx_STOPB */
int stop_bits;
/** handshake mechanisms, see @ref RTSER_xxx_HAND */
int handshake;
/** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */
int fifo_depth;
int reserved;
/** reception timeout, see @ref RTSER_TIMEOUT_xxx for special
* values */
nanosecs_rel_t rx_timeout;
/** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special
* values */
nanosecs_rel_t tx_timeout;
/** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */
nanosecs_rel_t event_timeout;
/** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */
int timestamp_history;
/** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see
* @ref RTSER_EVENT_xxx */
int event_mask;
/** enable RS485 mode, see @ref RTSER_RS485_xxx */
int rs485;
} rtser_config_t;
/**
* Serial device status
*/
typedef struct rtser_status {
/** line status register, see @ref RTSER_LSR_xxx */
int line_status;
/** modem status register, see @ref RTSER_MSR_xxx */
int modem_status;
} rtser_status_t;
/**
* Additional information about serial device events
*/
typedef struct rtser_event {
/** signalled events, see @ref RTSER_EVENT_xxx */
int events;
/** number of pending input characters */
int rx_pending;
/** last interrupt timestamp */
nanosecs_abs_t last_timestamp;
/** reception timestamp of oldest character in input queue */
nanosecs_abs_t rxpend_timestamp;
} rtser_event_t;
#define RTIOC_TYPE_SERIAL RTDM_CLASS_SERIAL
/*!
* @name Sub-Classes of RTDM_CLASS_SERIAL
* @{ */
#define RTDM_SUBCLASS_16550A 0
/** @} */
/*!
* @anchor SERIOCTLs @name IOCTLs
* Serial device IOCTLs
* @{ */
/**
* Get serial device configuration
*
* @param[out] arg Pointer to configuration buffer (struct rtser_config)
*
* @return 0 on success, otherwise negative error code
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_GET_CONFIG \
_IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config)
/**
* Set serial device configuration
*
* @param[in] arg Pointer to configuration buffer (struct rtser_config)
*
* @return 0 on success, otherwise:
*
* - -EPERM is returned if the caller's context is invalid, see note below.
*
* - -ENOMEM is returned if a new history buffer for timestamps cannot be
* allocated.
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* @note If rtser_config contains a valid timestamp_history and the
* addressed device has been opened in non-real-time context, this IOCTL must
* be issued in non-real-time context as well. Otherwise, this command will
* fail.
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_SET_CONFIG \
_IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config)
/**
* Get serial device status
*
* @param[out] arg Pointer to status buffer (struct rtser_status)
*
* @return 0 on success, otherwise negative error code
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR,
* @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have
* occured during previous read accesses to the device will be saved for being
* reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the
* saved state will be cleared.
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_GET_STATUS \
_IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status)
/**
* Get serial device's modem contol register
*
* @param[out] arg Pointer to variable receiving the content (int, see
* @ref RTSER_MCR_xxx)
*
* @return 0 on success, otherwise negative error code
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_GET_CONTROL \
_IOR(RTIOC_TYPE_SERIAL, 0x03, int)
/**
* Set serial device's modem contol register
*
* @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx)
*
* @return 0 on success, otherwise negative error code
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_SET_CONTROL \
_IOW(RTIOC_TYPE_SERIAL, 0x04, int)
/**
* Wait on serial device events according to previously set mask
*
* @param[out] arg Pointer to event information buffer (struct rtser_event)
*
* @return 0 on success, otherwise:
*
* - -EBUSY is returned if another task is already waiting on events of this
* device.
*
* - -EBADF is returned if the file descriptor is invalid or the device has
* just been closed.
*
* Environments:
*
* This service can be called from:
*
* - Kernel-based task
* - User-space task (RT)
*
* Rescheduling: possible.
*/
#define RTSER_RTIOC_WAIT_EVENT \
_IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event)
/** @} */
/**
* Set or clear break on UART output line
*
* @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int)
*
* @return 0 on success, otherwise negative error code
*
* Environments:
*
* This service can be called from:
*
* - Kernel module initialization/cleanup code
* - Kernel-based task
* - User-space task (RT, non-RT)
*
* @note A set break condition may also be cleared on UART line
* reconfiguration.
*
* Rescheduling: never.
*/
#define RTSER_RTIOC_BREAK_CTL \
_IOR(RTIOC_TYPE_SERIAL, 0x06, int)
/** @} */
/*!
* @anchor SERutils @name RT Serial example and utility programs
* @{ */
/** @example cross-link.c */
/** @} */
/** @} */
#endif /* _RTSERIAL_H */

View file

@ -1,466 +0,0 @@
/*!\file select.c
* \brief file descriptors events multiplexing.
* \author Gilles Chanteperdrix
*
* Copyright (C) 2008 Efixo <gilles.chanteperdrix@laposte.net>
*
* with adaptions for RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* RTAI is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License,
* or (at your option) any later version.
*
* RTAI is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with RTAI; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* \ingroup select
*/
/*!
* \ingroup nucleus
* \defgroup select File descriptors events multiplexing services.
*
* File descriptors events multiplexing services.
*
* This module implements the services needed for implementing the posix
* "select" service, or any other events multiplexing services.
*
* Following the implementation of the posix select service, this module defines
* three types of events:
* - \a XNSELECT_READ meaning that a file descriptor is ready for reading;
* - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing;
* - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional
* event.
*
* It works by defining two structures:
* - a @a struct @a xnselect structure, which should be added to every file
* descriptor for every event type (read, write, or except);
* - a @a struct @a xnselector structure, the selection structure, passed by
* the thread calling the xnselect service, where this service does all its
* housekeeping.
*@{*/
#include "select.h"
#ifdef CONFIG_RTAI_RTDM_SELECT
#include <linux/types.h>
#include <linux/bitops.h> /* For hweight_long */
static xnqueue_t xnselectors;
static int xnselect_apc;
#define link2binding(baddr, memb) \
container_of(baddr, struct xnselect_binding, memb)
/**
* Initialize a @a struct @a xnselect structure.
*
* This service must be called to initialize a @a struct @a xnselect structure
* before it is bound to a selector by the means of xnselect_bind().
*
* @param select_block pointer to the xnselect structure to be initialized
*/
void xnselect_init(struct xnselect *select_block)
{
initq(&select_block->bindings);
}
EXPORT_SYMBOL_GPL(xnselect_init);
static inline int xnselect_wakeup(struct xnselector *selector)
{
return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED;
}
/**
* Bind a file descriptor (represented by its @a xnselect structure) to a
* selector block.
*
* @param select_block pointer to the @a struct @a xnselect to be bound;
*
* @param binding pointer to a newly allocated (using xnmalloc) @a struct
* @a xnselect_binding;
*
* @param selector pointer to the selector structure;
*
* @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
* XNSELECT_EXCEPT);
*
* @param index index of the file descriptor (represented by @a select_block) in the bit fields used by the @a selector structure;
*
* @param state current state of the file descriptor>.
*
* @a select_block must have been initialized with xnselect_init(),
* the @a xnselector structure must have been initialized with
* xnselector_init(), @a binding may be uninitialized.
*
* This service must be called with nklock locked, irqs off. For this reason,
* the @a binding parameter must have been allocated by the caller outside the
* locking section.
*
* @retval -EINVAL if @a type or @a index is invalid;
* @retval 0 otherwise.
*/
int xnselect_bind(struct xnselect *select_block,
struct xnselect_binding *binding,
struct xnselector *selector,
unsigned type,
unsigned index,
unsigned state)
{
if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
return -EINVAL;
binding->selector = selector;
binding->fd = select_block;
binding->type = type;
binding->bit_index = index;
inith(&binding->link);
inith(&binding->slink);
appendq(&selector->bindings, &binding->slink);
appendq(&select_block->bindings, &binding->link);
__FD_SET__(index, &selector->fds[type].expected);
if (state) {
__FD_SET__(index, &selector->fds[type].pending);
if (xnselect_wakeup(selector))
xnpod_schedule();
} else
__FD_CLR__(index, &selector->fds[type].pending);
return 0;
}
EXPORT_SYMBOL_GPL(xnselect_bind);
/* Must be called with nklock locked irqs off */
int __xnselect_signal(struct xnselect *select_block, unsigned state)
{
xnholder_t *holder;
int resched;
for(resched = 0, holder = getheadq(&select_block->bindings);
holder; holder = nextq(&select_block->bindings, holder)) {
struct xnselect_binding *binding;
struct xnselector *selector;
binding = link2binding(holder, link);
selector = binding->selector;
if (state) {
if (!__FD_ISSET__(binding->bit_index,
&selector->fds[binding->type].pending)) {
__FD_SET__(binding->bit_index,
&selector->fds[binding->type].pending);
if (xnselect_wakeup(selector))
resched = 1;
}
} else
__FD_CLR__(binding->bit_index,
&selector->fds[binding->type].pending);
}
return resched;
}
EXPORT_SYMBOL_GPL(__xnselect_signal);
/**
* Destroy the @a xnselect structure associated with a file descriptor.
*
* Any binding with a @a xnselector block is destroyed.
*
* @param select_block pointer to the @a xnselect structure associated with a file descriptor
*/
void xnselect_destroy(struct xnselect *select_block)
{
xnholder_t *holder;
int resched = 0;
spl_t s;
xnlock_get_irqsave(&nklock, s);
while ((holder = getq(&select_block->bindings))) {
struct xnselect_binding *binding;
struct xnselector *selector;
binding = link2binding(holder, link);
selector = binding->selector;
__FD_CLR__(binding->bit_index,
&selector->fds[binding->type].expected);
if (!__FD_ISSET__(binding->bit_index,
&selector->fds[binding->type].pending)) {
__FD_SET__(binding->bit_index,
&selector->fds[binding->type].pending);
if (xnselect_wakeup(selector))
resched = 1;
}
removeq(&selector->bindings, &binding->slink);
xnlock_put_irqrestore(&nklock, s);
xnfree(binding);
xnlock_get_irqsave(&nklock, s);
}
if (resched)
xnpod_schedule();
xnlock_put_irqrestore(&nklock, s);
}
EXPORT_SYMBOL_GPL(xnselect_destroy);
static unsigned
fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n)
{
unsigned i, not_empty = 0;
for (i = 0; i < __FDELT__(n); i++)
if((result->fds_bits[i] =
first->fds_bits[i] & ~(second->fds_bits[i])))
not_empty = 1;
if (i < __FDSET_LONGS__
&& (result->fds_bits[i] =
first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1)))
not_empty = 1;
return not_empty;
}
static unsigned
fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n)
{
unsigned i, not_empty = 0;
for (i = 0; i < __FDELT__(n); i++)
if((result->fds_bits[i] =
first->fds_bits[i] & second->fds_bits[i]))
not_empty = 1;
if (i < __FDSET_LONGS__
&& (result->fds_bits[i] =
first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1)))
not_empty = 1;
return not_empty;
}
static void fd_set_zeropad(fd_set *set, unsigned n)
{
unsigned i;
i = __FDELT__(n);
if (i < __FDSET_LONGS__)
set->fds_bits[i] &= (__FDMASK__(n) - 1);
for(i++; i < __FDSET_LONGS__; i++)
set->fds_bits[i] = 0;
}
static unsigned fd_set_popcount(fd_set *set, unsigned n)
{
unsigned count = 0, i;
for (i = 0; i < __FDELT__(n); i++)
if (set->fds_bits[i])
count += hweight_long(set->fds_bits[i]);
if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));
return count;
}
/**
* Initialize a selector structure.
*
* @param selector The selector structure to be initialized.
*
* @retval 0
*/
int xnselector_init(struct xnselector *selector)
{
unsigned i;
xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
__FD_ZERO__(&selector->fds[i].expected);
__FD_ZERO__(&selector->fds[i].pending);
}
initq(&selector->bindings);
return 0;
}
EXPORT_SYMBOL_GPL(xnselector_init);
/**
* Check the state of a number of file descriptors, wait for a state change if
* no descriptor is ready.
*
* @param selector structure to check for pending events
* @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
* @param in_fds the set of descriptors which events should be checked
* @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
* @param timeout the timeout, whose meaning depends on @a timeout_mode, note
* that xnselect() pass @a timeout and @a timeout_mode unchanged to
* xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
* timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
* than expected if the sleep is interrupted.
* @param timeout_mode the mode of @a timeout.
*
* @retval -EINVAL if @a nfds is negative;
* @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
* been registered with xnselect_bind(), @a out_fds contains the set of such
* descriptors;
* @retval -EINTR if @a xnselect was interrupted while waiting;
* @retval 0 in case of timeout.
* @retval the number of file descriptors having received an event.
*/
int xnselect(struct xnselector *selector,
fd_set *out_fds[XNSELECT_MAX_TYPES],
fd_set *in_fds[XNSELECT_MAX_TYPES],
int nfds,
xnticks_t timeout, xntmode_t timeout_mode)
{
unsigned i, not_empty = 0;
xnthread_t *thread;
spl_t s;
if ((unsigned) nfds > __FD_SETSIZE)
return -EINVAL;
thread = xnpod_current_thread();
for (i = 0; i < XNSELECT_MAX_TYPES; i++)
if (out_fds[i])
fd_set_zeropad(out_fds[i], nfds);
xnlock_get_irqsave(&nklock, s);
for (i = 0; i < XNSELECT_MAX_TYPES; i++)
if (out_fds[i]
&& fd_set_andnot(out_fds[i], in_fds[i],
&selector->fds[i].expected, nfds))
not_empty = 1;
xnlock_put_irqrestore(&nklock, s);
if (not_empty)
return -ECHRNG;
xnlock_get_irqsave(&nklock, s);
for (i = 0; i < XNSELECT_MAX_TYPES; i++)
if (out_fds[i]
&& fd_set_and(out_fds[i], in_fds[i],
&selector->fds[i].pending, nfds))
not_empty = 1;
while (!not_empty) {
xnsynch_sleep_on(&selector->synchbase, timeout, timeout_mode);
for (i = 0; i < XNSELECT_MAX_TYPES; i++)
if (out_fds[i]
&& fd_set_and(out_fds[i], in_fds[i],
&selector->fds[i].pending, nfds))
not_empty = 1;
if (xnthread_test_info(thread, XNBREAK | XNTIMEO))
break;
}
xnlock_put_irqrestore(&nklock, s);
if (not_empty) {
unsigned count;
for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
if (out_fds[i])
count += fd_set_popcount(out_fds[i], nfds);
return count;
}
if (xnthread_test_info(thread, XNBREAK))
return -EINTR;
return 0; /* Timeout */
}
EXPORT_SYMBOL_GPL(xnselect);
/**
* Destroy a selector block.
*
* All bindings with file descriptor are destroyed.
*
* @param selector the selector block to be destroyed
*/
void xnselector_destroy(struct xnselector *selector)
{
spl_t s;
inith(&selector->destroy_link);
xnlock_get_irqsave(&nklock, s);
appendq(&xnselectors, &selector->destroy_link);
xnlock_put_irqrestore(&nklock, s);
rthal_apc_schedule(xnselect_apc);
}
EXPORT_SYMBOL_GPL(xnselector_destroy);
static void xnselector_destroy_loop(void *cookie)
{
struct xnselector *selector;
xnholder_t *holder;
int resched;
spl_t s;
xnlock_get_irqsave(&nklock, s);
while ((holder = getq(&xnselectors))) {
selector = container_of(holder, struct xnselector, destroy_link);
while ((holder = getq(&selector->bindings))) {
struct xnselect_binding *binding;
struct xnselect *fd;
binding = link2binding(holder, slink);
fd = binding->fd;
removeq(&fd->bindings, &binding->link);
xnlock_put_irqrestore(&nklock, s);
xnfree(binding);
xnlock_get_irqsave(&nklock, s);
}
resched =
xnsynch_destroy(&selector->synchbase) == XNSYNCH_RESCHED;
xnlock_put_irqrestore(&nklock, s);
xnfree(selector);
if (resched)
xnpod_schedule();
xnlock_get_irqsave(&nklock, s);
}
xnlock_put_irqrestore(&nklock, s);
}
int xnselect_mount(void)
{
initq(&xnselectors);
xnselect_apc = rthal_apc_alloc("xnselectors_destroy",
xnselector_destroy_loop, NULL);
if (xnselect_apc < 0)
return xnselect_apc;
return 0;
}
int xnselect_umount(void)
{
rthal_apc_free(xnselect_apc);
return 0;
}
#endif
/*@}*/

View file

@ -1,970 +0,0 @@
/**
* @file
* This file is part of the Xenomai project.
*
* @note Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
*
* adapted to RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/*!
* @ingroup nucleus
* @defgroup vfile Virtual file services
*
* Virtual files provide a mean to export RTAI object states to
* user-space, based on common kernel interfaces. This encapsulation
* is aimed at:
*
* - supporting consistent collection of very large record-based
* output, without encurring latency peaks for undergoing real-time
* activities.
*
* - in the future, hiding discrepancies between linux kernel
* releases, regarding the proper way to export kernel object states
* to userland, either via the /proc interface or by any other mean.
*
* This virtual file implementation offers record-based read support
* based on seq_files, single-buffer write support, directory and link
* handling, all visible from the /proc namespace.
*
* The vfile support exposes four filesystem object types:
*
* - snapshot-driven file (struct xnvfile_snapshot). This is commonly
* used to export real-time object states via the /proc filesystem. To
* minimize the latency involved in protecting the vfile routines from
* changes applied by real-time code on such objects, a snapshot of
* the data to output is first taken under proper locking, before the
* collected data is formatted and sent out in a lockless manner.
*
* Because a large number of records may have to be output, the data
* collection phase is not strictly atomic as a whole, but only
* protected at record level. The vfile implementation can be notified
* of updates to the underlying data set, and restart the collection
* from scratch until the snapshot is fully consistent.
*
* - regular sequential file (struct xnvfile_regular). This is
* basically an encapsulated sequential file object as available from
* the host kernel (i.e. seq_file), with a few additional features to
* make it more handy in an RTAI environment, like implicit locking
* support and shortened declaration for simplest, single-record
* output.
*
* - virtual link (struct xnvfile_link). This is a symbolic link
* feature integrated with the vfile semantics. The link target is
* computed dynamically at creation time from a user-given helper
* routine.
*
* - virtual directory (struct xnvfile_directory). A directory object,
* which can be used to create a hierarchy for ordering a set of vfile
* objects.
*
*@{*/
#include <stdarg.h>
#include <linux/ctype.h>
#include <rtdm/vfile.h>
/**
* @var struct xnvfile_directory nkvfroot
* @brief RTAI vfile root directory
*
* This vdir maps the /proc/rtai directory. It can be used to
* create a hierarchy of RTAI-related vfiles under this root.
*/
struct xnvfile_directory nkvfroot;
EXPORT_SYMBOL_GPL(nkvfroot);
static struct xnvfile_directory sysroot;
static void *vfile_snapshot_start(struct seq_file *seq, loff_t *offp)
{
struct xnvfile_snapshot_iterator *it = seq->private;
loff_t pos = *offp;
if (pos > it->nrdata)
return NULL;
if (pos == 0)
return SEQ_START_TOKEN;
return it->databuf + (pos - 1) * it->vfile->datasz;
}
static void *vfile_snapshot_next(struct seq_file *seq, void *v, loff_t *offp)
{
struct xnvfile_snapshot_iterator *it = seq->private;
loff_t pos = *offp;
if (pos >= it->nrdata)
return NULL;
++*offp;
return it->databuf + pos * it->vfile->datasz;
}
static void vfile_snapshot_stop(struct seq_file *seq, void *v)
{
}
static int vfile_snapshot_show(struct seq_file *seq, void *v)
{
struct xnvfile_snapshot_iterator *it = seq->private;
void *data = v == SEQ_START_TOKEN ? NULL : v;
int ret;
ret = it->vfile->ops->show(it, data);
return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
}
static struct seq_operations vfile_snapshot_ops = {
.start = vfile_snapshot_start,
.next = vfile_snapshot_next,
.stop = vfile_snapshot_stop,
.show = vfile_snapshot_show
};
static void vfile_snapshot_free(struct xnvfile_snapshot_iterator *it, void *buf)
{
kfree(buf);
}
static int vfile_snapshot_open(struct inode *inode, struct file *file)
{
struct xnvfile_snapshot *vfile = PDE_DATA(inode);
struct xnvfile_snapshot_ops *ops = vfile->ops;
struct xnvfile_snapshot_iterator *it;
int revtag, ret, nrdata;
struct seq_file *seq;
caddr_t data;
if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
return -EACCES;
/*
* Make sure to create the seq_file backend only when reading
* from the v-file is possible.
*/
if ((file->f_mode & FMODE_READ) == 0) {
file->private_data = NULL;
return 0;
}
if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
return -EBUSY;
it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
if (it == NULL)
return -ENOMEM;
it->vfile = vfile;
xnvfile_file(vfile) = file;
ret = vfile->entry.lockops->get(&vfile->entry);
if (ret)
goto fail;
redo:
/*
* The ->rewind() method is optional; there may be cases where
* we don't have to take an atomic snapshot of the v-file
* contents before proceeding. In case ->rewind() detects a
* stale backend object, it can force us to bail out.
*
* If present, ->rewind() may return a strictly positive
* value, indicating how many records at most may be returned
* by ->next(). We use this hint to allocate the snapshot
* buffer, in case ->begin() is not provided. The size of this
* buffer would then be vfile->datasz * hint value.
*
* If ->begin() is given, we always expect the latter do the
* allocation for us regardless of the hint value. Otherwise,
* a NULL return from ->rewind() tells us that the vfile won't
* output any snapshot data via ->show().
*/
nrdata = 0;
if (ops->rewind) {
nrdata = ops->rewind(it);
if (nrdata < 0) {
ret = nrdata;
vfile->entry.lockops->put(&vfile->entry);
goto fail;
}
}
revtag = vfile->tag->rev;
vfile->entry.lockops->put(&vfile->entry);
/* Release the data buffer, in case we had to restart. */
if (it->databuf) {
it->endfn(it, it->databuf);
it->databuf = NULL;
}
/*
* Having no record to output is fine, in which case ->begin()
* shall return VFILE_SEQ_EMPTY if present. ->begin() may be
* absent, meaning that no allocation is even required to
* collect the records to output. NULL is kept for allocation
* errors in all other cases.
*/
if (ops->begin) {
RTAI_BUGON(NUCLEUS, ops->end == NULL);
data = ops->begin(it);
if (data == NULL) {
kfree(it);
return -ENOMEM;
}
if (data != VFILE_SEQ_EMPTY) {
it->databuf = data;
it->endfn = ops->end;
}
} else if (nrdata > 0 && vfile->datasz > 0) {
/* We have a hint for auto-allocation. */
data = kmalloc(vfile->datasz * nrdata, GFP_KERNEL);
if (data == NULL) {
kfree(it);
return -ENOMEM;
}
it->databuf = data;
it->endfn = vfile_snapshot_free;
}
ret = seq_open(file, &vfile_snapshot_ops);
if (ret)
goto fail;
it->nrdata = 0;
data = it->databuf;
if (data == NULL)
goto finish;
/*
* Take a snapshot of the vfile contents, redo if the revision
* tag of the scanned data set changed concurrently.
*/
for (;;) {
ret = vfile->entry.lockops->get(&vfile->entry);
if (ret)
break;
if (vfile->tag->rev != revtag)
goto redo;
ret = ops->next(it, data);
vfile->entry.lockops->put(&vfile->entry);
if (ret <= 0)
break;
if (ret != VFILE_SEQ_SKIP) {
data += vfile->datasz;
it->nrdata++;
}
}
if (ret < 0) {
seq_release(inode, file);
fail:
if (it->databuf)
it->endfn(it, it->databuf);
kfree(it);
return ret;
}
finish:
seq = file->private_data;
it->seq = seq;
seq->private = it;
xnvfile_nref(vfile)++;
return 0;
}
static int vfile_snapshot_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct xnvfile_snapshot_iterator *it;
if (seq) {
it = seq->private;
if (it) {
--xnvfile_nref(it->vfile);
RTAI_BUGON(NUCLEUS, it->vfile->entry.refcnt < 0);
if (it->databuf)
it->endfn(it, it->databuf);
kfree(it);
}
return seq_release(inode, file);
}
return 0;
}
ssize_t vfile_snapshot_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
struct xnvfile_snapshot *vfile = PDE_DATA(wrap_f_inode(file));
struct xnvfile_input input;
ssize_t ret;
if (vfile->entry.lockops) {
ret = vfile->entry.lockops->get(&vfile->entry);
if (ret)
return ret;
}
input.u_buf = buf;
input.size = size;
input.vfile = &vfile->entry;
ret = vfile->ops->store(&input);
if (vfile->entry.lockops)
vfile->entry.lockops->put(&vfile->entry);
return ret;
}
static struct file_operations vfile_snapshot_fops = {
.owner = THIS_MODULE,
.open = vfile_snapshot_open,
.read = seq_read,
.write = vfile_snapshot_write,
.llseek = seq_lseek,
.release = vfile_snapshot_release,
};
/**
* @fn int xnvfile_init_snapshot(const char *name, struct xnvfile_snapshot *vfile, struct xnvfile_directory *parent)
* @brief Initialize a snapshot-driven vfile.
*
* @param name The name which should appear in the pseudo-filesystem,
* identifying the vfile entry.
*
* @param vfile A pointer to a vfile descriptor to initialize
* from. The following fields in this structure should be filled in
* prior to call this routine:
*
* - .privsz is the size (in bytes) of the private data area to be
* reserved in the @ref snapshot_iterator "vfile iterator". A NULL
* value indicates that no private area should be reserved.
*
* - .datasz is the size (in bytes) of a single record to be collected
* by the @ref snapshot_next "next() handler" from the @ref
* snapshot_ops "operation descriptor".
*
* - .tag is a pointer to a mandatory vfile revision tag structure
* (struct xnvfile_rev_tag). This tag will be monitored for changes by
* the vfile core while collecting data to output, so that any update
* detected will cause the current snapshot data to be dropped, and
* the collection to restart from the beginning. To this end, any
* change to the data which may be part of the collected records,
* should also invoke xnvfile_touch() on the associated tag.
*
* - entry.lockops is a pointer to a @ref vfile_lockops "locking
* descriptor", defining the lock and unlock operations for the
* vfile. This pointer may be left to NULL, in which case the
* operations on the nucleus lock (i.e. nklock) will be used
* internally around calls to data collection handlers (see @ref
* snapshot_ops "operation descriptor").
*
* - .ops is a pointer to an @ref snapshot_ops "operation descriptor".
*
* @param parent A pointer to a virtual directory descriptor; the
* vfile entry will be created into this directory. If NULL, the /proc
* root directory will be used. /proc/rtai is mapped on the
* globally available @a nkvfroot vdir.
*
* @return 0 is returned on success. Otherwise:
*
* - -ENOMEM is returned if the virtual file entry cannot be created
* in the /proc hierarchy.
*/
int xnvfile_init_snapshot(const char *name,
struct xnvfile_snapshot *vfile,
struct xnvfile_directory *parent)
{
struct proc_dir_entry *ppde, *pde;
int mode;
RTAI_BUGON(NUCLEUS, vfile->tag == NULL);
if (vfile->entry.lockops == NULL)
/* Defaults to nucleus lock */
vfile->entry.lockops = &xnvfile_nucleus_lock.ops;
if (parent == NULL)
parent = &sysroot;
mode = vfile->ops->store ? 0644 : 0444;
ppde = parent->entry.pde;
pde = proc_create_data(name, mode, ppde, &vfile_snapshot_fops, vfile);
if (pde == NULL)
return -ENOMEM;
wrap_proc_dir_entry_owner(pde);
vfile->entry.pde = pde;
return 0;
}
EXPORT_SYMBOL_GPL(xnvfile_init_snapshot);
static void *vfile_regular_start(struct seq_file *seq, loff_t *offp)
{
struct xnvfile_regular_iterator *it = seq->private;
struct xnvfile_regular *vfile = it->vfile;
int ret;
it->pos = *offp;
if (vfile->entry.lockops) {
ret = vfile->entry.lockops->get(&vfile->entry);
if (ret)
return ERR_PTR(ret);
}
/*
* If we have no begin() op, then we allow a single call only
* to ->show(), by returning the start token once. Otherwise,
* we are done.
*/
if (vfile->ops->begin == NULL)
return it->pos > 0 ? NULL : SEQ_START_TOKEN;
return vfile->ops->begin(it);
}
static void *vfile_regular_next(struct seq_file *seq, void *v, loff_t *offp)
{
struct xnvfile_regular_iterator *it = seq->private;
struct xnvfile_regular *vfile = it->vfile;
void *data;
if (vfile->ops->next == NULL)
return NULL;
it->pos = *offp + 1;
data = vfile->ops->next(it);
if (data == NULL)
return NULL;
*offp = it->pos;
return data;
}
static void vfile_regular_stop(struct seq_file *seq, void *v)
{
struct xnvfile_regular_iterator *it = seq->private;
struct xnvfile_regular *vfile = it->vfile;
if (vfile->entry.lockops)
vfile->entry.lockops->put(&vfile->entry);
if (vfile->ops->end)
vfile->ops->end(it);
}
static int vfile_regular_show(struct seq_file *seq, void *v)
{
struct xnvfile_regular_iterator *it = seq->private;
struct xnvfile_regular *vfile = it->vfile;
void *data = v == SEQ_START_TOKEN ? NULL : v;
int ret;
ret = vfile->ops->show(it, data);
return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
}
static struct seq_operations vfile_regular_ops = {
.start = vfile_regular_start,
.next = vfile_regular_next,
.stop = vfile_regular_stop,
.show = vfile_regular_show
};
static int vfile_regular_open(struct inode *inode, struct file *file)
{
struct xnvfile_regular *vfile = PDE_DATA(inode);
struct xnvfile_regular_ops *ops = vfile->ops;
struct xnvfile_regular_iterator *it;
struct seq_file *seq;
int ret;
if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
return -EBUSY;
if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
return -EACCES;
if ((file->f_mode & FMODE_READ) == 0) {
file->private_data = NULL;
return 0;
}
it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
if (it == NULL)
return -ENOMEM;
it->vfile = vfile;
it->pos = -1;
xnvfile_file(vfile) = file;
if (ops->rewind) {
ret = ops->rewind(it);
if (ret) {
fail:
kfree(it);
return ret;
}
}
ret = seq_open(file, &vfile_regular_ops);
if (ret)
goto fail;
seq = file->private_data;
it->seq = seq;
seq->private = it;
xnvfile_nref(vfile)++;
return 0;
}
static int vfile_regular_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct xnvfile_regular_iterator *it;
if (seq) {
it = seq->private;
if (it) {
--xnvfile_nref(it->vfile);
RTAI_BUGON(NUCLEUS, xnvfile_nref(it->vfile) < 0);
kfree(it);
}
return seq_release(inode, file);
}
return 0;
}
ssize_t vfile_regular_write(struct file *file, const char __user *buf,
size_t size, loff_t *ppos)
{
struct xnvfile_regular *vfile = PDE_DATA(wrap_f_inode(file));
struct xnvfile_input input;
ssize_t ret;
if (vfile->entry.lockops) {
ret = vfile->entry.lockops->get(&vfile->entry);
if (ret)
return ret;
}
input.u_buf = buf;
input.size = size;
input.vfile = &vfile->entry;
ret = vfile->ops->store(&input);
if (vfile->entry.lockops)
vfile->entry.lockops->put(&vfile->entry);
return ret;
}
static struct file_operations vfile_regular_fops = {
.owner = THIS_MODULE,
.open = vfile_regular_open,
.read = seq_read,
.write = vfile_regular_write,
.llseek = seq_lseek,
.release = vfile_regular_release,
};
/**
* @fn int xnvfile_init_regular(const char *name, struct xnvfile_regular *vfile, struct xnvfile_directory *parent)
* @brief Initialize a regular vfile.
*
* @param name The name which should appear in the pseudo-filesystem,
* identifying the vfile entry.
*
* @param vfile A pointer to a vfile descriptor to initialize
* from. The following fields in this structure should be filled in
* prior to call this routine:
*
* - .privsz is the size (in bytes) of the private data area to be
* reserved in the @ref regular_iterator "vfile iterator". A NULL
* value indicates that no private area should be reserved.
*
* - entry.lockops is a pointer to a @ref vfile_lockops "locking
* descriptor", defining the lock and unlock operations for the
* vfile. This pointer may be left to NULL, in which case no
* locking will be applied.
*
* - .ops is a pointer to an @ref regular_ops "operation descriptor".
*
* @param parent A pointer to a virtual directory descriptor; the
* vfile entry will be created into this directory. If NULL, the /proc
* root directory will be used. /proc/rtai is mapped on the
* globally available @a nkvfroot vdir.
*
* @return 0 is returned on success. Otherwise:
*
* - -ENOMEM is returned if the virtual file entry cannot be created
* in the /proc hierarchy.
*/
int xnvfile_init_regular(const char *name,
struct xnvfile_regular *vfile,
struct xnvfile_directory *parent)
{
struct proc_dir_entry *ppde, *pde;
int mode;
if (parent == NULL)
parent = &sysroot;
mode = vfile->ops->store ? 0644 : 0444;
ppde = parent->entry.pde;
pde = proc_create_data(name, mode, ppde, &vfile_regular_fops, vfile);
if (pde == NULL)
return -ENOMEM;
wrap_proc_dir_entry_owner(pde);
vfile->entry.pde = pde;
return 0;
}
EXPORT_SYMBOL_GPL(xnvfile_init_regular);
/**
* @fn int xnvfile_init_dir(const char *name, struct xnvfile_directory *vdir, struct xnvfile_directory *parent)
* @brief Initialize a virtual directory entry.
*
* @param name The name which should appear in the pseudo-filesystem,
* identifying the vdir entry.
*
* @param vdir A pointer to the virtual directory descriptor to
* initialize.
*
* @param parent A pointer to a virtual directory descriptor standing
* for the parent directory of the new vdir. If NULL, the /proc root
* directory will be used. /proc/rtai is mapped on the globally
* available @a nkvfroot vdir.
*
* @return 0 is returned on success. Otherwise:
*
* - -ENOMEM is returned if the virtual directory entry cannot be
* created in the /proc hierarchy.
*/
int xnvfile_init_dir(const char *name,
struct xnvfile_directory *vdir,
struct xnvfile_directory *parent)
{
struct proc_dir_entry *ppde, *pde;
if (parent == NULL)
parent = &sysroot;
ppde = parent->entry.pde;
pde = proc_mkdir(name, ppde);
if (pde == NULL)
return -ENOMEM;
vdir->entry.pde = pde;
vdir->entry.lockops = NULL;
vdir->entry.private = NULL;
wrap_proc_dir_entry_owner(pde);
return 0;
}
EXPORT_SYMBOL_GPL(xnvfile_init_dir);
/**
* @fn int xnvfile_init_link(const char *from, const char *to, struct xnvfile_link *vlink, struct xnvfile_directory *parent)
* @brief Initialize a virtual link entry.
*
* @param from The name which should appear in the pseudo-filesystem,
* identifying the vlink entry.
*
* @param to The target file name which should be referred to
* symbolically by @a name.
*
* @param vlink A pointer to the virtual link descriptor to
* initialize.
*
* @param parent A pointer to a virtual directory descriptor standing
* for the parent directory of the new vlink. If NULL, the /proc root
* directory will be used. /proc/rtai is mapped on the globally
* available @a nkvfroot vdir.
*
* @return 0 is returned on success. Otherwise:
*
* - -ENOMEM is returned if the virtual link entry cannot be created
* in the /proc hierarchy.
*/
int xnvfile_init_link(const char *from,
const char *to,
struct xnvfile_link *vlink,
struct xnvfile_directory *parent)
{
struct proc_dir_entry *ppde, *pde;
if (parent == NULL)
parent = &sysroot;
ppde = parent->entry.pde;
pde = proc_symlink(from, ppde, to);
if (pde == NULL)
return -ENOMEM;
vlink->entry.pde = pde;
vlink->entry.lockops = NULL;
vlink->entry.private = NULL;
wrap_proc_dir_entry_owner(pde);
return 0;
}
EXPORT_SYMBOL_GPL(xnvfile_init_link);
/**
* @fn void xnvfile_destroy(struct xnvfile *vfile)
* @brief Removes a virtual file entry.
*
* @param vfile A pointer to the virtual file descriptor to
* remove.
*/
void xnvfile_destroy(struct xnvfile *vfile)
{
proc_remove(vfile->pde);
}
EXPORT_SYMBOL_GPL(xnvfile_destroy);
/**
* @fn ssize_t xnvfile_get_blob(struct xnvfile_input *input, void *data, size_t size)
* @brief Read in a data bulk written to the vfile.
*
* When writing to a vfile, the associated store() handler from the
* @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
* "regular vfile" is called, with a single argument describing the
* input data. xnvfile_get_blob() retrieves this data as an untyped
* binary blob, and copies it back to the caller's buffer.
*
* @param input A pointer to the input descriptor passed to the
* store() handler.
*
* @param data The address of the destination buffer to copy the input
* data to.
*
* @param size The maximum number of bytes to copy to the destination
* buffer. If @a size is larger than the actual data size, the input
* is truncated to @a size.
*
* @return The number of bytes read and copied to the destination
* buffer upon success. Otherwise, a negative error code is returned:
*
* - -EFAULT indicates an invalid source buffer address.
*/
ssize_t xnvfile_get_blob(struct xnvfile_input *input,
void *data, size_t size)
{
ssize_t nbytes = input->size;
if (nbytes > size)
nbytes = size;
if (nbytes > 0 && copy_from_user(data, input->u_buf, nbytes))
return -EFAULT;
return nbytes;
}
EXPORT_SYMBOL_GPL(xnvfile_get_blob);
/**
* @fn ssize_t xnvfile_get_string(struct xnvfile_input *input, char *s, size_t maxlen)
* @brief Read in a C-string written to the vfile.
*
* When writing to a vfile, the associated store() handler from the
* @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
* "regular vfile" is called, with a single argument describing the
* input data. xnvfile_get_string() retrieves this data as a
* null-terminated character string, and copies it back to the
* caller's buffer.
*
* @param input A pointer to the input descriptor passed to the
* store() handler.
*
* @param s The address of the destination string buffer to copy the
* input data to.
*
* @param maxlen The maximum number of bytes to copy to the
* destination buffer, including the ending null character. If @a
* maxlen is larger than the actual string length, the input is
* truncated to @a maxlen.
*
* @return The number of characters read and copied to the destination
* buffer upon success. Otherwise, a negative error code is returned:
*
* - -EFAULT indicates an invalid source buffer address.
*/
ssize_t xnvfile_get_string(struct xnvfile_input *input,
char *s, size_t maxlen)
{
ssize_t nbytes;
if (maxlen < 1)
return -EINVAL;
nbytes = xnvfile_get_blob(input, s, maxlen - 1);
if (nbytes < 0)
return nbytes;
if (nbytes > 0 && s[nbytes - 1] == '\n')
nbytes--;
s[nbytes] = '\0';
return nbytes;
}
EXPORT_SYMBOL_GPL(xnvfile_get_string);
/**
* @fn ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
* @brief Evaluate the string written to the vfile as a long integer.
*
* When writing to a vfile, the associated store() handler from the
* @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
* "regular vfile" is called, with a single argument describing the
* input data. xnvfile_get_integer() retrieves and interprets this
* data as a long integer, and copies the resulting value back to @a
* valp.
*
* The long integer can be expressed in decimal, octal or hexadecimal
* bases depending on the prefix found.
*
* @param input A pointer to the input descriptor passed to the
* store() handler.
*
* @param valp The address of a long integer variable to receive the
* value.
*
* @return The number of characters read while evaluating the input as
* a long integer upon success. Otherwise, a negative error code is
* returned:
*
* - -EINVAL indicates a parse error on the input stream; the written
* text cannot be evaluated as a long integer.
*
* - -EFAULT indicates an invalid source buffer address.
*/
ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
{
char *end, buf[32];
ssize_t nbytes;
long val;
nbytes = xnvfile_get_blob(input, buf, sizeof(buf) - 1);
if (nbytes < 0)
return nbytes;
if (nbytes == 0)
return -EINVAL;
buf[nbytes] = '\0';
val = simple_strtol(buf, &end, 0);
if (*end != '\0' && !isspace(*end))
return -EINVAL;
*valp = val;
return nbytes;
}
EXPORT_SYMBOL_GPL(xnvfile_get_integer);
int __vfile_hostlock_get(struct xnvfile *vfile)
{
struct xnvfile_hostlock_class *lc;
lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
return down_interruptible(&lc->sem) ? -ERESTARTSYS : 0;
}
EXPORT_SYMBOL_GPL(__vfile_hostlock_get);
void __vfile_hostlock_put(struct xnvfile *vfile)
{
struct xnvfile_hostlock_class *lc;
lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
up(&lc->sem);
}
EXPORT_SYMBOL_GPL(__vfile_hostlock_put);
static int __vfile_nklock_get(struct xnvfile *vfile)
{
struct xnvfile_nklock_class *lc;
lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
xnlock_get_irqsave(&nklock, lc->s);
return 0;
}
static void __vfile_nklock_put(struct xnvfile *vfile)
{
struct xnvfile_nklock_class *lc;
lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
xnlock_put_irqrestore(&nklock, lc->s);
}
struct xnvfile_nklock_class xnvfile_nucleus_lock = {
.ops = {
.get = __vfile_nklock_get,
.put = __vfile_nklock_put,
},
};
int __init xnvfile_init_root(void)
{
struct xnvfile_directory *vdir = &nkvfroot;
struct proc_dir_entry *pde;
pde = proc_mkdir("rtai", NULL);
if (pde == NULL)
return -ENOMEM;
vdir->entry.pde = pde;
vdir->entry.lockops = NULL;
vdir->entry.private = NULL;
wrap_proc_dir_entry_owner(pde);
return 0;
}
void xnvfile_destroy_root(void)
{
nkvfroot.entry.pde = NULL;
remove_proc_entry("rtai", NULL);
}
/*@}*/

View file

@ -1,700 +0,0 @@
/**
* @file
* This file is part of the Xenomai project.
*
* @note Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
*
* adapted to RTAI by Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* @ingroup vfile
*/
#ifndef RTAI_RTDM_VFILE_H
#define RTAI_RTDM_VFILE_H
#ifdef CONFIG_PROC_FS
/** @addtogroup vfile
*@{*/
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <rtdm/xn.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
#define PDE_DATA(inode) PDE(inode)->data
static inline void proc_remove(struct proc_dir_entry *pde)
{
remove_proc_entry(pde->name, pde->parent);
}
#endif
#define wrap_f_inode(file) ((file)->f_path.dentry->d_inode)
#define wrap_proc_dir_entry_owner(entry) do { (void)entry; } while(0)
struct xnvfile_directory;
struct xnvfile_regular_iterator;
struct xnvfile_snapshot_iterator;
struct xnvfile_lock_ops;
struct xnvfile {
struct proc_dir_entry *pde;
struct file *file;
struct xnvfile_lock_ops *lockops;
int refcnt;
void *private;
};
/**
* @brief Vfile locking operations
* @anchor vfile_lockops
*
* This structure describes the operations to be provided for
* implementing locking support on vfiles. They apply to both
* snapshot-driven and regular vfiles.
*/
struct xnvfile_lock_ops {
/**
* @anchor lockops_get
* This handler should grab the desired lock.
*
* @param vfile A pointer to the virtual file which needs
* locking.
*
* @return zero should be returned if the call
* succeeds. Otherwise, a negative error code can be returned;
* upon error, the current vfile operation is aborted, and the
* user-space caller is passed back the error value.
*/
int (*get)(struct xnvfile *vfile);
/**
* @anchor lockops_put This handler should release the lock
* previously grabbed by the @ref lockops_get "get() handler".
*
* @param vfile A pointer to the virtual file which currently
* holds the lock to release.
*/
void (*put)(struct xnvfile *vfile);
};
/*
* XXX: struct semaphore is legacy for mutual exclusion, but supported
* on both 2.4 and 2.6 kernels. Will be changed to mutex when 2.4
* support is dropped from RTAI.
*/
struct xnvfile_hostlock_class {
struct xnvfile_lock_ops ops;
struct semaphore sem;
};
struct xnvfile_nklock_class {
struct xnvfile_lock_ops ops;
spl_t s;
};
struct xnvfile_input {
const char __user *u_buf;
size_t size;
struct xnvfile *vfile;
};
/**
* @brief Regular vfile operation descriptor
* @anchor regular_ops
*
* This structure describes the operations available with a regular
* vfile. It defines handlers for sending back formatted kernel data
* upon a user-space read request, and for obtaining user data upon a
* user-space write request.
*/
struct xnvfile_regular_ops {
/**
* @anchor regular_rewind This handler is called only once,
* when the virtual file is opened, before the @ref
* regular_begin "begin() handler" is invoked.
*
* @param it A pointer to the vfile iterator which will be
* used to read the file contents.
*
* @return Zero should be returned upon success. Otherwise, a
* negative error code aborts the operation, and is passed
* back to the reader.
*
* @note This handler is optional. It should not be used to
* allocate resources but rather to perform consistency
* checks, since no closure call is issued in case the open
* sequence eventually fails.
*/
int (*rewind)(struct xnvfile_regular_iterator *it);
/**
* @anchor regular_begin
* This handler should prepare for iterating over the records
* upon a read request, starting from the specified position.
*
* @param it A pointer to the current vfile iterator. On
* entry, it->pos is set to the (0-based) position of the
* first record to output. This handler may be called multiple
* times with different position requests.
*
* @return A pointer to the first record to format and output,
* to be passed to the @ref regular_show "show() handler" as
* its @a data parameter, if the call succeeds. Otherwise:
*
* - NULL in case no record is available, in which case the
* read operation will terminate immediately with no output.
*
* - VFILE_SEQ_START, a special value indicating that @ref
* regular_show "the show() handler" should receive a NULL
* data pointer first, in order to output a header.
*
* - ERR_PTR(errno), where errno is a negative error code;
* upon error, the current operation will be aborted
* immediately.
*
* @note This handler is optional; if none is given in the
* operation descriptor (i.e. NULL value), the @ref
* regular_show "show() handler()" will be called only once
* for a read operation, with a NULL @a data parameter. This
* particular setting is convenient for simple regular vfiles
* having a single, fixed record to output.
*/
void *(*begin)(struct xnvfile_regular_iterator *it);
/**
* @anchor regular_next
* This handler should return the address of the next record
* to format and output by the @ref regular_show "show()
* handler".
*
* @param it A pointer to the current vfile iterator. On
* entry, it->pos is set to the (0-based) position of the
* next record to output.
*
* @return A pointer to the next record to format and output,
* to be passed to the @ref regular_show "show() handler" as
* its @a data parameter, if the call succeeds. Otherwise:
*
* - NULL in case no record is available, in which case the
* read operation will terminate immediately with no output.
*
* - ERR_PTR(errno), where errno is a negative error code;
* upon error, the current operation will be aborted
* immediately.
*
* @note This handler is optional; if none is given in the
* operation descriptor (i.e. NULL value), the read operation
* will stop after the first invocation of the @ref regular_show
* "show() handler".
*/
void *(*next)(struct xnvfile_regular_iterator *it);
/**
* @anchor regular_end
* This handler is called after all records have been output.
*
* @param it A pointer to the current vfile iterator.
*
* @note This handler is optional and the pointer may be NULL.
*/
void (*end)(struct xnvfile_regular_iterator *it);
/**
* @anchor regular_show
* This handler should format and output a record.
*
* xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
* xnvfile_putc() are available to format and/or emit the
* output. All routines take the iterator argument @a it as
* their first parameter.
*
* @param it A pointer to the current vfile iterator.
*
* @param data A pointer to the record to format then
* output. The first call to the handler may receive a NULL @a
* data pointer, depending on the presence and/or return of a
* @ref regular_begin "hander"; the show handler should test
* this special value to output any header that fits, prior to
* receiving more calls with actual records.
*
* @return zero if the call succeeds, also indicating that the
* handler should be called for the next record if
* any. Otherwise:
*
* - A negative error code. This will abort the output phase,
* and return this status to the reader.
*
* - VFILE_SEQ_SKIP, a special value indicating that the
* current record should be skipped and will not be output.
*/
int (*show)(struct xnvfile_regular_iterator *it, void *data);
/**
* @anchor regular_store
* This handler receives data written to the vfile, likely for
* updating some kernel setting, or triggering any other
* action which fits. This is the only handler which deals
* with the write-side of a vfile. It is called when writing
* to the /proc entry of the vfile from a user-space process.
*
* The input data is described by a descriptor passed to the
* handler, which may be subsequently passed to parsing helper
* routines. For instance, xnvfile_get_string() will accept
* the input descriptor for returning the written data as a
* null-terminated character string. On the other hand,
* xnvfile_get_integer() will attempt to return a long integer
* from the input data.
*
* @param input A pointer to an input descriptor. It refers to
* an opaque data from the handler's standpoint.
*
* @return the number of bytes read from the input descriptor
* if the call succeeds. Otherwise, a negative error code.
* Return values from parsing helper routines are commonly
* passed back to the caller by the @ref store
* "store() handler".
*
* @note This handler is optional, and may be omitted for
* read-only vfiles.
*/
ssize_t (*store)(struct xnvfile_input *input);
};
struct xnvfile_regular {
struct xnvfile entry;
size_t privsz;
struct xnvfile_regular_ops *ops;
};
struct xnvfile_regular_template {
size_t privsz;
struct xnvfile_regular_ops *ops;
struct xnvfile_lock_ops *lockops;
};
/**
* @brief Regular vfile iterator
* @anchor regular_iterator
*
* This structure defines an iterator over a regular vfile.
*/
struct xnvfile_regular_iterator {
/** Current record position while iterating. */
loff_t pos;
/** Backlink to the host sequential file supporting the vfile. */
struct seq_file *seq;
/** Backlink to the vfile being read. */
struct xnvfile_regular *vfile;
/**
* Start of private area. Use xnvfile_iterator_priv() to
* address it.
*/
char private[0];
};
/**
* @brief Snapshot vfile operation descriptor
* @anchor snapshot_ops
*
* This structure describes the operations available with a
* snapshot-driven vfile. It defines handlers for returning a
* printable snapshot of some RTAI object contents upon a
* user-space read request, and for updating this object upon a
* user-space write request.
*/
struct xnvfile_snapshot_ops {
/**
* @anchor snapshot_rewind
* This handler (re-)initializes the data collection, moving
* the seek pointer at the first record. When the file
* revision tag is touched while collecting data, the current
* reading is aborted, all collected data dropped, and the
* vfile is eventually rewound.
*
* @param it A pointer to the current snapshot iterator. Two
* useful information can be retrieved from this iterator in
* this context:
*
* - it->vfile is a pointer to the descriptor of the virtual
* file being rewound.
*
* - xnvfile_iterator_priv(it) returns a pointer to the
* private data area, available from the descriptor, which
* size is vfile->privsz. If the latter size is zero, the
* returned pointer is meaningless and should not be used.
*
* @return A negative error code aborts the data collection,
* and is passed back to the reader. Otherwise:
*
* - a strictly positive value is interpreted as the total
* number of records which will be returned by the @ref
* snapshot_next "next() handler" during the data collection
* phase. If no @ref snapshot_begin "begin() handler" is
* provided in the @ref snapshot_ops "operation descriptor",
* this value is used to allocate the snapshot buffer
* internally. The size of this buffer would then be
* vfile->datasz * value.
*
* - zero leaves the allocation to the @ref snapshot_begin
* "begin() handler" if present, or indicates that no record
* is to be output in case such handler is not given.
*
* @note This handler is optional; a NULL value indicates that
* nothing needs to be done for rewinding the vfile. It is
* called with the vfile lock held.
*/
int (*rewind)(struct xnvfile_snapshot_iterator *it);
/**
* @anchor snapshot_begin
* This handler should allocate the snapshot buffer to hold
* records during the data collection phase. When specified,
* all records collected via the @ref snapshot_next "next()
* handler" will be written to a cell from the memory area
* returned by begin().
*
* @param it A pointer to the current snapshot iterator.
*
* @return A pointer to the record buffer, if the call
* succeeds. Otherwise:
*
* - NULL in case of allocation error. This will abort the data
* collection, and return -ENOMEM to the reader.
*
* - VFILE_SEQ_EMPTY, a special value indicating that no
* record will be output. In such a case, the @ref
* snapshot_next "next() handler" will not be called, and the
* data collection will stop immediately. However, the @ref
* snapshot_show "show() handler" will still be called once,
* with a NULL data pointer (i.e. header display request).
*
* @note This handler is optional; if none is given, an
* internal allocation depending on the value returned by the
* @ref snapshot_rewind "rewind() handler" can be obtained.
*/
void *(*begin)(struct xnvfile_snapshot_iterator *it);
/**
* @anchor snapshot_end
* This handler releases the memory buffer previously obtained
* from begin(). It is usually called after the snapshot data
* has been output by show(), but it may also be called before
* rewinding the vfile after a revision change, to release the
* dropped buffer.
*
* @param it A pointer to the current snapshot iterator.
*
* @param buf A pointer to the buffer to release.
*
* @note This routine is optional and the pointer may be
* NULL. It is not needed upon internal buffer allocation;
* see the description of the @ref snapshot_rewind "rewind()
* handler".
*/
void (*end)(struct xnvfile_snapshot_iterator *it, void *buf);
/**
* @anchor snapshot_next
* This handler fetches the next record, as part of the
* snapshot data to be sent back to the reader via the
* show().
*
* @param it A pointer to the current snapshot iterator.
*
* @param data A pointer to the record to fill in.
*
* @return a strictly positive value, if the call succeeds and
* leaves a valid record into @a data, which should be passed
* to the @ref snapshot_show "show() handler()" during the
* formatting and output phase. Otherwise:
*
* - A negative error code. This will abort the data
* collection, and return this status to the reader.
*
* - VFILE_SEQ_SKIP, a special value indicating that the
* current record should be skipped. In such a case, the @a
* data pointer is not advanced to the next position before
* the @ref snapshot_next "next() handler" is called anew.
*
* @note This handler is called with the vfile lock
* held. Before each invocation of this handler, the vfile
* core checks whether the revision tag has been touched, in
* which case the data collection is restarted from scratch. A
* data collection phase succeeds whenever all records can be
* fetched via the @ref snapshot_next "next() handler", while
* the revision tag remains unchanged, which indicates that a
* consistent snapshot of the object state was taken.
*/
int (*next)(struct xnvfile_snapshot_iterator *it, void *data);
/**
* @anchor snapshot_show
* This handler should format and output a record from the
* collected data.
*
* xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
* xnvfile_putc() are available to format and/or emit the
* output. All routines take the iterator argument @a it as
* their first parameter.
*
* @param it A pointer to the current snapshot iterator.
*
* @param data A pointer to the record to format then
* output. The first call to the handler is always passed a
* NULL @a data pointer; the show handler should test this
* special value to output any header that fits, prior to
* receiving more calls with actual records.
*
* @return zero if the call succeeds, also indicating that the
* handler should be called for the next record if
* any. Otherwise:
*
* - A negative error code. This will abort the output phase,
* and return this status to the reader.
*
* - VFILE_SEQ_SKIP, a special value indicating that the
* current record should be skipped and will not be output.
*/
int (*show)(struct xnvfile_snapshot_iterator *it, void *data);
/**
* @anchor snapshot_store
* This handler receives data written to the vfile, likely for
* updating the associated RTAI object's state, or
* triggering any other action which fits. This is the only
* handler which deals with the write-side of a vfile. It is
* called when writing to the /proc entry of the vfile
* from a user-space process.
*
* The input data is described by a descriptor passed to the
* handler, which may be subsequently passed to parsing helper
* routines. For instance, xnvfile_get_string() will accept
* the input descriptor for returning the written data as a
* null-terminated character string. On the other hand,
* xnvfile_get_integer() will attempt to return a long integer
* from the input data.
*
* @param input A pointer to an input descriptor. It refers to
* an opaque data from the handler's standpoint.
*
* @return the number of bytes read from the input descriptor
* if the call succeeds. Otherwise, a negative error code.
* Return values from parsing helper routines are commonly
* passed back to the caller by the @ref snapshot_store
* "store() handler".
*
* @note This handler is optional, and may be omitted for
* read-only vfiles.
*/
ssize_t (*store)(struct xnvfile_input *input);
};
/**
* @brief Snapshot revision tag
* @anchor revision_tag
*
* This structure defines a revision tag to be used with @ref
* snapshot_vfile "snapshot-driven vfiles".
*/
struct xnvfile_rev_tag {
/** Current revision number. */
int rev;
};
struct xnvfile_snapshot_template {
size_t privsz;
size_t datasz;
struct xnvfile_rev_tag *tag;
struct xnvfile_snapshot_ops *ops;
struct xnvfile_lock_ops *lockops;
};
/**
* @brief Snapshot vfile descriptor
* @anchor snapshot_vfile
*
* This structure describes a snapshot-driven vfile. Reading from
* such a vfile involves a preliminary data collection phase under
* lock protection, and a subsequent formatting and output phase of
* the collected data records. Locking is done in a way that does not
* increase worst-case latency, regardless of the number of records to
* be collected for output.
*/
struct xnvfile_snapshot {
struct xnvfile entry;
size_t privsz;
size_t datasz;
struct xnvfile_rev_tag *tag;
struct xnvfile_snapshot_ops *ops;
};
/**
* @brief Snapshot-driven vfile iterator
* @anchor snapshot_iterator
*
* This structure defines an iterator over a snapshot-driven vfile.
*/
struct xnvfile_snapshot_iterator {
/** Number of collected records. */
int nrdata;
/** Address of record buffer. */
caddr_t databuf;
/** Backlink to the host sequential file supporting the vfile. */
struct seq_file *seq;
/** Backlink to the vfile being read. */
struct xnvfile_snapshot *vfile;
/** Buffer release handler. */
void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf);
/**
* Start of private area. Use xnvfile_iterator_priv() to
* address it.
*/
char private[0];
};
struct xnvfile_directory {
struct xnvfile entry;
};
struct xnvfile_link {
struct xnvfile entry;
};
/* vfile.begin()=> */
#define VFILE_SEQ_EMPTY ((void *)-1)
/* =>vfile.show() */
#define VFILE_SEQ_START SEQ_START_TOKEN
/* vfile.next/show()=> */
#define VFILE_SEQ_SKIP 2
#define xnvfile_printf(it, args...) seq_printf((it)->seq, ##args)
#define xnvfile_write(it, data, len) seq_write((it)->seq, (data),(len))
#define xnvfile_puts(it, s) seq_puts((it)->seq, (s))
#define xnvfile_putc(it, c) seq_putc((it)->seq, (c))
static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag)
{
tag->rev++;
}
static inline void xnvfile_touch(struct xnvfile_snapshot *vfile)
{
xnvfile_touch_tag(vfile->tag);
}
#define xnvfile_noentry \
{ \
.pde = NULL, \
.private = NULL, \
.file = NULL, \
.refcnt = 0, \
}
#define xnvfile_nodir { .entry = xnvfile_noentry }
#define xnvfile_nolink { .entry = xnvfile_noentry }
#define xnvfile_nofile { .entry = xnvfile_noentry }
#define xnvfile_priv(e) ((e)->entry.private)
#define xnvfile_nref(e) ((e)->entry.refcnt)
#define xnvfile_file(e) ((e)->entry.file)
#define xnvfile_iterator_priv(it) ((void *)(&(it)->private))
extern struct xnvfile_nklock_class xnvfile_nucleus_lock;
extern struct xnvfile_directory nkvfroot;
int xnvfile_init_root(void);
void xnvfile_destroy_root(void);
#ifdef __cplusplus
extern "C" {
#endif
int xnvfile_init_snapshot(const char *name,
struct xnvfile_snapshot *vfile,
struct xnvfile_directory *parent);
int xnvfile_init_regular(const char *name,
struct xnvfile_regular *vfile,
struct xnvfile_directory *parent);
int xnvfile_init_dir(const char *name,
struct xnvfile_directory *vdir,
struct xnvfile_directory *parent);
int xnvfile_init_link(const char *from,
const char *to,
struct xnvfile_link *vlink,
struct xnvfile_directory *parent);
void xnvfile_destroy(struct xnvfile *vfile);
ssize_t xnvfile_get_blob(struct xnvfile_input *input,
void *data, size_t size);
ssize_t xnvfile_get_string(struct xnvfile_input *input,
char *s, size_t maxlen);
ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp);
int __vfile_hostlock_get(struct xnvfile *vfile);
void __vfile_hostlock_put(struct xnvfile *vfile);
#ifdef __cplusplus
}
#endif
static inline
void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile)
{
xnvfile_destroy(&vfile->entry);
}
static inline
void xnvfile_destroy_regular(struct xnvfile_regular *vfile)
{
xnvfile_destroy(&vfile->entry);
}
static inline
void xnvfile_destroy_dir(struct xnvfile_directory *vdir)
{
xnvfile_destroy(&vdir->entry);
}
static inline
void xnvfile_destroy_link(struct xnvfile_link *vlink)
{
xnvfile_destroy(&vlink->entry);
}
#define DEFINE_VFILE_HOSTLOCK(name) \
struct xnvfile_hostlock_class name = { \
.ops = { \
.get = __vfile_hostlock_get, \
.put = __vfile_hostlock_put, \
}, \
.sem = __SEMAPHORE_INITIALIZER(name.sem, 1), \
}
#else /* !CONFIG_PROC_FS */
#define xnvfile_touch_tag(tag) do { } while (0)
#define xnvfile_touch(vfile) do { } while (0)
#endif /* !CONFIG_PROC_FS */
/*@}*/
#endif /* !RTAI_RTDM_VFILE_H */

View file

@ -34,29 +34,15 @@ endmenu
menu "Machine (x86 / x86_64)" menu "Machine (x86 / x86_64)"
config RTAI_FPU_SUPPORT
bool "Enable FPU support"
default y
help
The FPU executes instructions from the processor's normal
instruction stream. It can handle the types of high-precision
floating-point processing operations commonly found in
scientific, engineering, and business applications. Enabling
FPU support on a platform providing this hardware component
may greatly improve performances. You can obtain more
information about Float-Point Unit on x86 platform on
internet at the following URL:
http://www.intel.com/design/intarch/techinfo/Pentium/fpu.htm
config RTAI_CPUS config RTAI_CPUS
string "Number of CPUs (SMP-only)" string "Number of CPUs (SMP-only)"
default 2 default 4
help help
RTAI has native support for Symmetrical Multi-Processing RTAI has native support for Symmetrical Multi-Processing
machines. If it is your case, you may want to enter here the machines. If it is your case, you may want to enter here the
number of CPUs of your motherboard. number of CPUs of your motherboard.
PAY ATTENTION: the default value is 2. PAY ATTENTION: the default value is 4.
config RTAI_DIAG_TSC_SYNC config RTAI_DIAG_TSC_SYNC
bool "Diagnose out of sync MP-TSCs" bool "Diagnose out of sync MP-TSCs"
@ -207,7 +193,7 @@ config RTAI_USER_BUSY_ALIGN_RET_DELAY
config RTAI_SCHED_LXRT_NUMSLOTS config RTAI_SCHED_LXRT_NUMSLOTS
string "Number of registrable RTAI objects" string "Number of registrable RTAI objects"
default 256 default 1024
help help
The maximum number of registrable objects in RTAI. The maximum number of registrable objects in RTAI.
@ -264,7 +250,7 @@ config RTAI_FULL_PRINHER
config RTAI_ALIGN_LINUX_PRIORITY config RTAI_ALIGN_LINUX_PRIORITY
bool "Keep Linux task priority aligned to RTAI" bool "Keep Linux task priority aligned to RTAI"
default n default y
help help
By enabling this option the RTAI scheduler will keep Linux tasks By enabling this option the RTAI scheduler will keep Linux tasks
@ -275,7 +261,7 @@ config RTAI_ALIGN_LINUX_PRIORITY
config RTAI_ONE_SHOT config RTAI_ONE_SHOT
bool "One-shot timer mode" bool "One-shot timer mode"
default n default y
help help
Set to enable one-shot timer mode as the default. If not set, the Set to enable one-shot timer mode as the default. If not set, the
hard timer will run in periodic mode according to the period used hard timer will run in periodic mode according to the period used
@ -313,22 +299,6 @@ endmenu
menu "Supported services" menu "Supported services"
config RTAI_BITS
tristate "Event flags"
default m
help
Event flags are used to synchronize a task to the occurrence of
multiple events. RTAI uses the term "bits" to stress the fact that
events are just logical objects, i.e. a kind of digital I/O,
nothing else being associated to them, e.g a count.
So any synchronization based on them may be disjunctive, when any
of the events have occurred, or conjunctive, when all events have
occurred. The former corresponds to a logical OR whereas the latter
is associated to a logical AND. Their use is similar to semaphores
except that signal/waits are not related to just a simple counter
but depends on the combination of set of bits.
The module will be called rtai_bits.o.
config RTAI_FIFOS config RTAI_FIFOS
tristate "Fifo" tristate "Fifo"
default m default m
@ -463,28 +433,6 @@ config RTAI_MBX
Mailboxes depend on semaphores. Mailboxes depend on semaphores.
The module will be called rtai_mbx.o. The module will be called rtai_mbx.o.
config RTAI_TBX
tristate "RTAI message queues and typed mailboxes"
depends on RTAI_SEM
default y if RTAI_SEM=y
default m if RTAI_SEM=m
help
RTAI message queues (msgq) are intertask processor messages that allow
exchanging prioritised messages of any size. Broadcasting of messages
to all the waiting tasks is also possible.
Legacy typed mailbox (TBX) services are recovered by using RTAI msgqs
and afford a pre canned example of their use offering:
1 - message broadcasting allowing to send a message to all the tasks
that are pending on the same TBX;
2 - urgent sending of messages: these messages are not enqueued, but
inserted in the head of the queue, bypassing all the other
messages already present in TBX;
3 - a priority or fifo wakeup policy that may be set at runtime when
creating the typed mailbox.
Typed mailboxes depend on semaphores.
The module will be called rtai_tbx.o.
config RTAI_TASKLETS config RTAI_TASKLETS
tristate "Tasklets" tristate "Tasklets"
default m default m
@ -577,20 +525,6 @@ config RTAI_USE_NEWERR
to choose what to do, without forcing any adaption for already to choose what to do, without forcing any adaption for already
existing working applications. existing working applications.
config RTAI_MATH
tristate "Mathfuns support in kernel"
depends on RTAI_FPU_SUPPORT
default y
help
This parameter allows building a module containing math support
functions for kernel space. It might be useful to avoid fighting
for the use of libc within the kernel.
config RTAI_MATH_C99
bool "C99 standard support"
depends on RTAI_MATH
default y
config RTAI_MALLOC config RTAI_MALLOC
tristate "Real-time malloc support" tristate "Real-time malloc support"
default y default y
@ -687,47 +621,3 @@ config RTAI_HARD_SOFT_TOGGLER
endmenu endmenu
endmenu endmenu
menu "Add-ons"
config RTAI_RTDM
bool "Real-Time Driver Model over RTAI"
default n
help
Real Time Drive Model specific implementation for RTAI.
config RTAI_RTDM_FD_MAX
depends on RTAI_RTDM
string "Number of RTDM file descriptors"
default 512
help
The maximum number of file descriptors in RTDM.
config RTAI_RTDM_SELECT
depends on RTAI_RTDM
bool "Enable select multiplexing for RTDM services"
default n
help
By enabling this option, select(2) support can be used to monitor
access to multiple RTDM services all in one, as for the standard UNIX
support, with the exception of the timeout argument format. Being it
RTDM specific nanoseconds are used directly in place of
timeval/timespec.
config RTAI_RTDM_SHIRQ
depends on RTAI_RTDM
bool "Shared interrupts"
default n
help
Make it possible for RTDM own interrupt handlers to manage shared
interrupts.
config RTAI_DEBUG_RTDM
depends on RTAI_RTDM
bool "Enable some elementary RTDM debugging messages"
default n
help
Enable some elementary debugging of wrong requests to RTDM, in the
form of messages asserting what's improper and where it's happening.
endmenu

View file

@ -3,8 +3,6 @@ moduledir = @RTAI_MODULE_DIR@
modext = @RTAI_MODULE_EXT@ modext = @RTAI_MODULE_EXT@
libcalibrate_rt_a_SOURCES = calibrate-module.c calibrate.h libcalibrate_rt_a_SOURCES = calibrate-module.c calibrate.h
libsmi_rt_a_SOURCES = smi-module.c libsmi_rt_a_SOURCES = smi-module.c
@ -57,7 +55,7 @@ calibration_PROGRAMS += calibration_helper
calibrate_SOURCES = calibrate.c calibrate.h calibrate_SOURCES = calibrate.c calibrate.h
calibrate_CPPFLAGS = \ calibrate_CPPFLAGS = \
@RTAI_REAL_USER_CFLAGS@ \ @RTAI_USER_CFLAGS@ \
-DKERNEL_HELPER_PATH="\"$(calibrationdir)/rtai_calibrate$(modext)\"" \ -DKERNEL_HELPER_PATH="\"$(calibrationdir)/rtai_calibrate$(modext)\"" \
-DUSER_HELPER_PATH="\"$(calibrationdir)/calibration_helper\"" \ -DUSER_HELPER_PATH="\"$(calibrationdir)/calibration_helper\"" \
-I$(top_srcdir)/base/include \ -I$(top_srcdir)/base/include \
@ -66,7 +64,7 @@ calibrate_CPPFLAGS = \
calibration_helper_SOURCES = calibration_helper.c calibration_helper_SOURCES = calibration_helper.c
calibration_helper_CPPFLAGS = \ calibration_helper_CPPFLAGS = \
@RTAI_REAL_USER_CFLAGS@ \ @RTAI_USER_CFLAGS@ \
-DHAL_SCHED_PATH="\"$(DESTDIR)$(moduledir)\"" \ -DHAL_SCHED_PATH="\"$(DESTDIR)$(moduledir)\"" \
-DHAL_SCHED_MODEXT="\"$(modext)\"" \ -DHAL_SCHED_MODEXT="\"$(modext)\"" \
-I$(top_srcdir)/base/include \ -I$(top_srcdir)/base/include \

View file

@ -27,7 +27,7 @@
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#define COUNT 0xFFFFFFFFU #define COUNT 0xFFFFFFFFU
static struct params_t params = { 0, SETUP_TIME_8254, LATENCY_8254, 0, LATENCY_APIC, SETUP_TIME_APIC, CALIBRATED_APIC_FREQ, 0, CALIBRATED_CPU_FREQ, CLOCK_TICK_RATE, LATCH }; static struct params_t params = { 0, SETUP_TIME_8254, LATENCY_8254, 0, LATENCY_APIC, SETUP_TIME_APIC, CALIBRATED_APIC_FREQ, 0, CALIBRATED_CPU_FREQ, CLOCK_TICK_RATE, LATCH };
@ -59,7 +59,7 @@ static void calibrate(void)
rt_pend_linux_irq(TIMER_8254_IRQ); rt_pend_linux_irq(TIMER_8254_IRQ);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
if (params.mp) { if (params.mp) {
unsigned temp = (apic_read(APIC_ICR) & (~0xCDFFF)) | (APIC_DM_FIXED | APIC_DEST_ALLINC | LOCAL_TIMER_VECTOR); unsigned temp = (apic_read(APIC_ICR) & (~0xCDFFF)) | (APIC_DM_FIXED | APIC_DEST_ALLINC | LOCAL_TIMER_VECTOR);
apic_write(APIC_ICR, temp); apic_write(APIC_ICR, temp);
} }
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
@ -67,7 +67,7 @@ static void calibrate(void)
static void just_ret(void) static void just_ret(void)
{ {
return; return;
} }
static RT_TASK rtask; static RT_TASK rtask;
@ -168,9 +168,9 @@ static long long user_srq(unsigned long whatever)
} }
case END_FREQ_CAL: { case END_FREQ_CAL: {
rt_free_timer(); rt_free_timer();
rt_reset_irq_to_sym_mode(TIMER_8254_IRQ); rt_reset_irq_to_sym_mode(TIMER_8254_IRQ);
rt_free_global_irq(TIMER_8254_IRQ); rt_free_global_irq(TIMER_8254_IRQ);
break; break;
} }
@ -186,8 +186,8 @@ static long long user_srq(unsigned long whatever)
} }
case END_BUS_CHECK: { case END_BUS_CHECK: {
rt_free_timer(); rt_free_timer();
rt_reset_irq_to_sym_mode(TIMER_8254_IRQ); rt_reset_irq_to_sym_mode(TIMER_8254_IRQ);
break; break;
} }
case GET_PARAMS: { case GET_PARAMS: {
@ -203,14 +203,14 @@ static int srq;
int init_module(void) int init_module(void)
{ {
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
params.mp = 1; params.mp = 1;
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
params.freq_apic = RTAI_FREQ_APIC; params.freq_apic = RTAI_FREQ_APIC;
params.cpu_freq = RTAI_CPU_FREQ; params.cpu_freq = RTAI_CPU_FREQ;
rtf_create(0, FIFOBUFSIZE); rtf_create(0, FIFOBUFSIZE);
if ((srq = rt_request_srq(CALSRQ, (void *)user_srq, user_srq)) < 0) { if ((srq = rt_request_srq(CALSRQ, (void *)user_srq, user_srq)) < 0) {
printk("No sysrq available for the calibration.\n"); printk("No sysrq available for the calibration.\n");
return srq; return srq;
} }
return 0; return 0;
} }

View file

@ -52,12 +52,12 @@ struct option options[] = {
{ "user", 0, 0, 'u' }, { "user", 0, 0, 'u' },
{ "period", 1, 0, 'p' }, { "period", 1, 0, 'p' },
{ "time", 1, 0, 't' }, { "time", 1, 0, 't' },
{ "cpu", 0, 0, 'c' }, { "cpu", 0, 0, 'c' },
{ "apic", 0, 0, 'a' }, { "apic", 0, 0, 'a' },
{ "both", 0, 0, 'b' }, { "both", 0, 0, 'b' },
{ "scope", 1, 0, 's' }, { "scope", 1, 0, 's' },
{ "interrupt", 0, 0, 'i' }, { "interrupt", 0, 0, 'i' },
{ NULL, 0, 0, 0 } { NULL, 0, 0, 0 }
}; };
void print_usage(void) void print_usage(void)
@ -194,8 +194,8 @@ int main(int argc, char *argv[])
rtai_srq(srq, (unsigned long)args); rtai_srq(srq, (unsigned long)args);
read(fifo, &average, sizeof(average)); read(fifo, &average, sizeof(average));
average /= (int)args[2]; average /= (int)args[2];
if (params.mp) { if (params.mp) {
printf("\n*** '#define LATENCY_APIC %d' (IN USE %lu)\n\n", (int)params.latency_apic + average, params.latency_apic); printf("\n*** '#define LATENCY_APIC %d' (IN USE %lu)\n\n", (int)params.latency_apic + average, params.latency_apic);
} else { } else {
printf("\n*** '#define LATENCY_8254 %d' (IN USE %lu)\n\n", (int)params.latency_8254 + average, params.latency_8254); printf("\n*** '#define LATENCY_8254 %d' (IN USE %lu)\n\n", (int)params.latency_8254 + average, params.latency_8254);
} }
@ -226,8 +226,8 @@ int main(int argc, char *argv[])
// exit(1); // exit(1);
} }
average /= (int)args[2]; average /= (int)args[2];
if (params.mp) { if (params.mp) {
printf("\n*** '#define LATENCY_APIC %d' (IN USE %lu)\n\n", (int)params.latency_apic + average, params.latency_apic); printf("\n*** '#define LATENCY_APIC %d' (IN USE %lu)\n\n", (int)params.latency_apic + average, params.latency_apic);
} else { } else {
printf("\n*** '#define LATENCY_8254 %d' (IN USE %lu)\n\n", (int)params.latency_8254 + average, params.latency_8254); printf("\n*** '#define LATENCY_8254 %d' (IN USE %lu)\n\n", (int)params.latency_8254 + average, params.latency_8254);
} }

View file

@ -1,4 +1,4 @@
#define CALSRQ 0xcacca #define CALSRQ 0xcacca
#define CAL_8254 1 #define CAL_8254 1
#define KLATENCY 2 #define KLATENCY 2

View file

@ -25,8 +25,8 @@ struct option options[] = {
{ "help", 0, 0, 'h' }, { "help", 0, 0, 'h' },
{ "period", 1, 0, 'p' }, { "period", 1, 0, 'p' },
{ "spantime", 1, 0, 's' }, { "spantime", 1, 0, 's' },
{ "tol", 1, 0, 't' }, { "tol", 1, 0, 't' },
{ NULL, 0, 0, 0 } { NULL, 0, 0, 0 }
}; };
void print_usage(void) void print_usage(void)
@ -85,13 +85,13 @@ int main(int argc, char *argv[])
{ {
int kern_latency, UserLatency = 0, KernLatency = 0, tol = 100; int kern_latency, UserLatency = 0, KernLatency = 0, tol = 100;
while (1) { while (1) {
int c; int c;
if ((c = getopt_long(argc, argv, "hp:t:l:", options, NULL)) < 0) { if ((c = getopt_long(argc, argv, "hp:t:l:", options, NULL)) < 0) {
break; break;
} }
switch(c) { switch(c) {
case 'h': { print_usage(); return 0; } case 'h': { print_usage(); return 0; }
case 'p': { period = atoi(optarg); break; } case 'p': { period = atoi(optarg); break; }
case 't': { loops = atoi(optarg); break; } case 't': { loops = atoi(optarg); break; }
case 'l': { tol = atoi(optarg); break; } case 'l': { tol = atoi(optarg); break; }

View file

@ -85,11 +85,11 @@ pci.ids database, ICH5-M ?)
*/ */
#define DEVFN 0xf8 /* device 31, function 0 */ #define DEVFN 0xf8 /* device 31, function 0 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#define pci_get_device(a, b, c) pci_find_device(a, b, c) #define pci_get_device(a, b, c) pci_find_device(a, b, c)
#define pci_dev_put(a) do { /*nothing*/ } while(0) #define pci_dev_put(a) do { /*nothing*/ } while(0)
#endif #endif
#define PMBASE_B0 0x40 #define PMBASE_B0 0x40
@ -103,13 +103,13 @@ pci.ids database, ICH5-M ?)
#define INTEL_USB2_EN_BIT (0x01 << 18) /* ICH4, ... */ #define INTEL_USB2_EN_BIT (0x01 << 18) /* ICH4, ... */
#define LEGACY_USB2_EN_BIT (0x01 << 17) /* ICH4, ... */ #define LEGACY_USB2_EN_BIT (0x01 << 17) /* ICH4, ... */
#define PERIODIC_EN_BIT (0x01 << 14) /* called 1MIN_ in ICH0 */ #define PERIODIC_EN_BIT (0x01 << 14) /* called 1MIN_ in ICH0 */
#define TCO_EN_BIT (0x01 << 13) #define TCO_EN_BIT (0x01 << 13)
#define MCSMI_EN_BIT (0x01 << 11) #define MCSMI_EN_BIT (0x01 << 11)
#define SWSMI_TMR_EN_BIT (0x01 << 6) #define SWSMI_TMR_EN_BIT (0x01 << 6)
#define APMC_EN_BIT (0x01 << 5) #define APMC_EN_BIT (0x01 << 5)
#define SLP_EN_BIT (0x01 << 4) #define SLP_EN_BIT (0x01 << 4)
#define LEGACY_USB_EN_BIT (0x01 << 3) #define LEGACY_USB_EN_BIT (0x01 << 3)
#define BIOS_EN_BIT (0x01 << 2) #define BIOS_EN_BIT (0x01 << 2)
#define GBL_SMI_EN_BIT (0x01 << 0) /* This is reset by a PCI reset event! */ #define GBL_SMI_EN_BIT (0x01 << 0) /* This is reset by a PCI reset event! */
unsigned long hal_smi_masked_bits = 0 unsigned long hal_smi_masked_bits = 0
@ -166,9 +166,9 @@ static int rtai_smi_notify_reboot(struct notifier_block *nb, unsigned long event
} }
static struct notifier_block rtai_smi_reboot_notifier = { static struct notifier_block rtai_smi_reboot_notifier = {
.notifier_call = &rtai_smi_notify_reboot, .notifier_call = &rtai_smi_notify_reboot,
.next = NULL, .next = NULL,
.priority = 0 .priority = 0
}; };
static void hal_smi_restore(void) static void hal_smi_restore(void)
@ -208,14 +208,14 @@ static int hal_smi_init(void)
* Just register the used ports. * Just register the used ports.
*/ */
for (id = &hal_smi_pci_tbl[0]; dev == NULL && id->vendor != 0; id++) { for (id = &hal_smi_pci_tbl[0]; dev == NULL && id->vendor != 0; id++) {
dev = pci_get_device(id->vendor, id->device, NULL); dev = pci_get_device(id->vendor, id->device, NULL);
} }
if (dev == NULL || dev->bus->number || dev->devfn != DEVFN) { if (dev == NULL || dev->bus->number || dev->devfn != DEVFN) {
pci_dev_put(dev); pci_dev_put(dev);
printk("RTAI: Intel chipset not found.\n"); printk("RTAI: Intel chipset not found.\n");
return -ENODEV; return -ENODEV;
} }
printk("RTAI: Intel chipset found, enabling SMI workaround.\n"); printk("RTAI: Intel chipset found, enabling SMI workaround.\n");
hal_smi_en_addr = get_smi_en_addr(dev); hal_smi_en_addr = get_smi_en_addr(dev);
@ -266,15 +266,15 @@ MODULE_LICENSE("GPL");
SMI, depending on registers configuration : SMI, depending on registers configuration :
register DEVTRAP_EN, DEVTRAP_STS register DEVTRAP_EN, DEVTRAP_STS
BIG FAT WARNING : globally disabling SMI on a box with SATA disks and BIG FAT WARNING : globally disabling SMI on a box with SATA disks and
SATA controller in "legacy" mode, probably prevents disks from SATA controller in "legacy" mode, probably prevents disks from
working. working.
pages 382, 383, 400; Monitors ? pages 382, 383, 400; Monitors ?
seem to be a generic legacy device emulation (like previous), registers seem to be a generic legacy device emulation (like previous), registers
MON[4-7]_FWD_EN, enables forwarding of I/O to LPC MON[4-7]_FWD_EN, enables forwarding of I/O to LPC
MON[4-7]_TRP_RNG, address of the emulated devices MON[4-7]_TRP_RNG, address of the emulated devices
MON[4-7]_TRP_MSK and MON_SMI (registers MON[4-7]_TRAP_EN and MON[4-7]_TRP_MSK and MON_SMI (registers MON[4-7]_TRAP_EN and
MON[4-7]_TRAP_STS) MON[4-7]_TRAP_STS)
page 407: TCO page 407: TCO
register TCO1_CNT, bit NMI2SMI_EN, enables TCO to use SMI instead of NMI, register TCO1_CNT, bit NMI2SMI_EN, enables TCO to use SMI instead of NMI,

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,6 @@
# #
# Automatically generated make config: don't edit # Automatically generated file; DO NOT EDIT.
# RTAI/x86 configuration
# #
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_RTAI_VERSION="4.0 (vulcano)" CONFIG_RTAI_VERSION="4.0 (vulcano)"
@ -11,22 +12,9 @@ CONFIG_RTAI_INSTALLDIR="/usr/realtime"
CONFIG_RTAI_LINUXDIR="/usr/src/linux" CONFIG_RTAI_LINUXDIR="/usr/src/linux"
# #
# RTAI Documentation # Machine (x86 / x86_64)
# #
# CONFIG_RTAI_DOX_DOC is not set CONFIG_RTAI_CPUS="4"
# CONFIG_RTAI_DOC_LATEX_NONSTOP is not set
# CONFIG_RTAI_DBX_DOC is not set
CONFIG_RTAI_TESTSUITE=y
CONFIG_RTAI_COMPAT=y
# CONFIG_RTAI_EXTENDED is not set
CONFIG_RTAI_LXRT_NO_INLINE=y
# CONFIG_RTAI_LXRT_STATIC_INLINE is not set
#
# Machine (x86)
#
CONFIG_RTAI_FPU_SUPPORT=y
CONFIG_RTAI_CPUS="2"
# CONFIG_RTAI_DIAG_TSC_SYNC is not set # CONFIG_RTAI_DIAG_TSC_SYNC is not set
# #
@ -41,27 +29,26 @@ CONFIG_RTAI_CPUS="2"
CONFIG_RTAI_SCHED_8254_LATENCY="4700" CONFIG_RTAI_SCHED_8254_LATENCY="4700"
CONFIG_RTAI_SCHED_APIC_LATENCY="3944" CONFIG_RTAI_SCHED_APIC_LATENCY="3944"
# CONFIG_RTAI_BUSY_TIME_ALIGN is not set # CONFIG_RTAI_BUSY_TIME_ALIGN is not set
CONFIG_RTAI_SCHED_LXRT_NUMSLOTS="150" CONFIG_RTAI_SCHED_LXRT_NUMSLOTS="1024"
CONFIG_RTAI_MONITOR_EXECTIME=y CONFIG_RTAI_MONITOR_EXECTIME=y
CONFIG_RTAI_ALLOW_RR=y CONFIG_RTAI_ALLOW_RR=y
# CONFIG_RTAI_FULL_PRINHER is not set # CONFIG_RTAI_FULL_PRINHER is not set
# CONFIG_RTAI_ALIGN_LINUX_PRIORITY is not set CONFIG_RTAI_ALIGN_LINUX_PRIORITY=y
# CONFIG_RTAI_ONE_SHOT is not set CONFIG_RTAI_ONE_SHOT=y
CONFIG_RTAI_CAL_FREQS_FACT="0" CONFIG_RTAI_CAL_FREQS_FACT="0"
# #
# Supported services # Supported services
# #
CONFIG_RTAI_BITS=m
CONFIG_RTAI_FIFOS=m CONFIG_RTAI_FIFOS=m
CONFIG_RTAI_NETRPC=m CONFIG_RTAI_NETRPC=m
# CONFIG_RTAI_NETRPC_RTNET is not set # CONFIG_RTAI_NETRPC_RTNET is not set
CONFIG_RTAI_SHM=m CONFIG_RTAI_SHM=m
CONFIG_RTAI_SEM=m CONFIG_RTAI_SEM=m
# CONFIG_RTAI_RT_POLL is not set CONFIG_RTAI_RT_POLL=y
# CONFIG_RTAI_RT_POLL_ON_STACK is not set
CONFIG_RTAI_MSG=m CONFIG_RTAI_MSG=m
CONFIG_RTAI_MBX=m CONFIG_RTAI_MBX=m
CONFIG_RTAI_TBX=m
CONFIG_RTAI_TASKLETS=m CONFIG_RTAI_TASKLETS=m
CONFIG_RTAI_MQ=m CONFIG_RTAI_MQ=m
CONFIG_RTAI_CLOCK_REALTIME=y CONFIG_RTAI_CLOCK_REALTIME=y
@ -69,33 +56,11 @@ CONFIG_RTAI_CLOCK_REALTIME=y
# #
# Other features # Other features
# #
# CONFIG_RTAI_USE_NEWERR is not set CONFIG_RTAI_USE_NEWERR=y
# CONFIG_RTAI_MATH is not set
CONFIG_RTAI_MALLOC=y CONFIG_RTAI_MALLOC=y
# CONFIG_RTAI_USE_TLSF is not set # CONFIG_RTAI_USE_TLSF is not set
CONFIG_RTAI_MALLOC_VMALLOC=y CONFIG_RTAI_MALLOC_VMALLOC=y
CONFIG_RTAI_MALLOC_HEAPSZ="2048" CONFIG_RTAI_MALLOC_HEAPSZ="2048"
CONFIG_RTAI_KSTACK_HEAPSZ="512" CONFIG_RTAI_KSTACK_HEAPSZ="512"
# CONFIG_RTAI_TASK_SWITCH_SIGNAL is not set # CONFIG_RTAI_TASK_SWITCH_SIGNAL is not set
# CONFIG_RTAI_USI is not set
CONFIG_RTAI_WD=y
# CONFIG_RTAI_HARD_SOFT_TOGGLER is not set # CONFIG_RTAI_HARD_SOFT_TOGGLER is not set
#
# Add-ons
#
CONFIG_RTAI_COMEDI_LXRT=y
CONFIG_RTAI_COMEDI_DIR="/usr/comedi"
# CONFIG_RTAI_USE_COMEDI_LOCK is not set
# CONFIG_RTAI_CPLUSPLUS is not set
# CONFIG_RTAI_RTDM is not set
#
# Drivers
#
CONFIG_RTAI_DRIVERS_SERIAL=y
#
# RTAI Lab
#
# CONFIG_RTAI_LAB is not set

View file

@ -226,23 +226,23 @@ void cleanup_tsc_sync(void)
#define CAL_LOOPS 200 #define CAL_LOOPS 200
int rtai_calibrate_hard_timer(void) int rtai_calibrate_hard_timer(void)
{ {
unsigned long flags; unsigned long flags;
RTIME t; RTIME t;
int i, dt; int i, dt;
flags = rtai_critical_enter(NULL); flags = rtai_critical_enter(NULL);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
t = rtai_rdtsc(); t = rtai_rdtsc();
for (i = 0; i < CAL_LOOPS; i++) { for (i = 0; i < CAL_LOOPS; i++) {
apic_write_around(APIC_TMICT, 8000); apic_write_around(APIC_TMICT, 8000);
} }
#else #else
outb(0x34, 0x43); outb(0x34, 0x43);
t = rtai_rdtsc(); t = rtai_rdtsc();
for (i = 0; i < CAL_LOOPS; i++) { for (i = 0; i < CAL_LOOPS; i++) {
outb(LATCH & 0xff,0x40); outb(LATCH & 0xff,0x40);
outb(LATCH >> 8,0x40); outb(LATCH >> 8,0x40);
} }
#endif #endif
dt = (int)(rtai_rdtsc() - t); dt = (int)(rtai_rdtsc() - t);
rtai_critical_exit(flags); rtai_critical_exit(flags);

View file

@ -591,7 +591,7 @@ void rt_end_irq (unsigned irq)
void rt_eoi_irq (unsigned irq) void rt_eoi_irq (unsigned irq)
{ {
rtai_irq_desc_chip(irq)->rtai_irq_endis_fun(eoi, irq); rtai_irq_desc_chip(irq)->rtai_irq_endis_fun(eoi, irq);
} }
/** /**
@ -938,8 +938,8 @@ void rt_request_apic_timers (void (*handler)(void), struct apic_timer_setup_data
} }
} }
rtai_critical_exit(flags);
rtai_request_tickdev(handler); rtai_request_tickdev(handler);
rtai_critical_exit(flags);
} }
/** /**
@ -956,8 +956,8 @@ void rt_free_apic_timers(void)
rtai_release_tickdev(); rtai_release_tickdev();
rtai_sync_level = 3; rtai_sync_level = 3;
rtai_setup_periodic_apic(RTAI_APIC_ICOUNT,LOCAL_TIMER_VECTOR); rtai_setup_periodic_apic(RTAI_APIC_ICOUNT,LOCAL_TIMER_VECTOR);
rtai_critical_exit(flags);
rt_release_irq(RTAI_APIC_TIMER_IPI); rt_release_irq(RTAI_APIC_TIMER_IPI);
rtai_critical_exit(flags);
} }
/** /**
@ -1034,9 +1034,9 @@ unsigned long rt_reset_irq_to_sym_mode (int irq)
} }
cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity); cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity);
if (rtai_old_irq_affinity[irq]) { if (rtai_old_irq_affinity[irq]) {
hal_set_irq_affinity(irq, CPUMASK_T(rtai_old_irq_affinity[irq])); hal_set_irq_affinity(irq, CPUMASK_T(rtai_old_irq_affinity[irq]));
rtai_old_irq_affinity[irq] = 0; rtai_old_irq_affinity[irq] = 0;
} }
spin_unlock(&rtai_iset_lock); spin_unlock(&rtai_iset_lock);
rtai_restore_flags(flags); rtai_restore_flags(flags);
@ -1235,15 +1235,15 @@ EXPORT_SYMBOL(rtai_isr_sched);
if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \ if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \
if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_sched) { \ if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_sched) { \
rtai_isr_sched(cpuid); \ rtai_isr_sched(cpuid); \
} \ } \
} \ } \
} while (0) } while (0)
#else /* !CONFIG_RTAI_SCHED_ISR_LOCK */ #else /* !CONFIG_RTAI_SCHED_ISR_LOCK */
#define RTAI_SCHED_ISR_LOCK() \ #define RTAI_SCHED_ISR_LOCK() \
do { } while (0) do { } while (0)
// do { cpuid = rtai_cpuid(); } while (0) // do { cpuid = rtai_cpuid(); } while (0)
#define RTAI_SCHED_ISR_UNLOCK() \ #define RTAI_SCHED_ISR_UNLOCK() \
do { } while (0) do { } while (0)
#endif /* CONFIG_RTAI_SCHED_ISR_LOCK */ #endif /* CONFIG_RTAI_SCHED_ISR_LOCK */
static int rtai_hirq_dispatcher (int irq) static int rtai_hirq_dispatcher (int irq)
@ -1274,38 +1274,38 @@ static int rtai_trap_fault (unsigned event, void *evdata)
{ {
#ifdef HINT_DIAG_TRAPS #ifdef HINT_DIAG_TRAPS
static unsigned long traps_in_hard_intr = 0; static unsigned long traps_in_hard_intr = 0;
do { do {
unsigned long flags; unsigned long flags;
rtai_save_flags_and_cli(flags); rtai_save_flags_and_cli(flags);
if (!test_bit(RTAI_IFLAG, &flags)) { if (!test_bit(RTAI_IFLAG, &flags)) {
if (!test_and_set_bit(event, &traps_in_hard_intr)) { if (!test_and_set_bit(event, &traps_in_hard_intr)) {
HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr);); HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr););
} }
} }
} while (0); } while (0);
#endif #endif
static const int trap2sig[] = { static const int trap2sig[] = {
SIGFPE, // 0 - Divide error SIGFPE, // 0 - Divide error
SIGTRAP, // 1 - Debug SIGTRAP, // 1 - Debug
SIGSEGV, // 2 - NMI (but we ignore these) SIGSEGV, // 2 - NMI (but we ignore these)
SIGTRAP, // 3 - Software breakpoint SIGTRAP, // 3 - Software breakpoint
SIGSEGV, // 4 - Overflow SIGSEGV, // 4 - Overflow
SIGSEGV, // 5 - Bounds SIGSEGV, // 5 - Bounds
SIGILL, // 6 - Invalid opcode SIGILL, // 6 - Invalid opcode
SIGSEGV, // 7 - Device not available SIGSEGV, // 7 - Device not available
SIGSEGV, // 8 - Double fault SIGSEGV, // 8 - Double fault
SIGFPE, // 9 - Coprocessor segment overrun SIGFPE, // 9 - Coprocessor segment overrun
SIGSEGV, // 10 - Invalid TSS SIGSEGV, // 10 - Invalid TSS
SIGBUS, // 11 - Segment not present SIGBUS, // 11 - Segment not present
SIGBUS, // 12 - Stack segment SIGBUS, // 12 - Stack segment
SIGSEGV, // 13 - General protection fault SIGSEGV, // 13 - General protection fault
SIGSEGV, // 14 - Page fault SIGSEGV, // 14 - Page fault
0, // 15 - Spurious interrupt 0, // 15 - Spurious interrupt
SIGFPE, // 16 - Coprocessor error SIGFPE, // 16 - Coprocessor error
SIGBUS, // 17 - Alignment check SIGBUS, // 17 - Alignment check
SIGSEGV, // 18 - Reserved SIGSEGV, // 18 - Reserved
SIGFPE, // 19 - XMM fault SIGFPE, // 19 - XMM fault
0,0,0,0,0,0,0,0,0,0,0,0 0,0,0,0,0,0,0,0,0,0,0,0
}; };
@ -1460,7 +1460,7 @@ static unsigned long hal_request_apic_freq(void);
static void rtai_install_archdep (void) static void rtai_install_archdep (void)
{ {
ipipe_select_timers(cpu_active_mask); ipipe_select_timers(cpu_present_mask);
hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue); hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue);
if (rtai_cpufreq_arg == 0) { if (rtai_cpufreq_arg == 0) {
@ -1592,7 +1592,7 @@ static int PROC_READ_FUN(rtai_read_proc)
} }
PROC_PRINT("\n #%d at %p", i, rtai_domain.irqs[i].handler); PROC_PRINT("\n #%d at %p", i, rtai_domain.irqs[i].handler);
} }
} }
if (none) { if (none) {
PROC_PRINT("none"); PROC_PRINT("none");
} }
@ -1608,7 +1608,7 @@ static int PROC_READ_FUN(rtai_read_proc)
PROC_PRINT("#%d ", i); PROC_PRINT("#%d ", i);
none = 0; none = 0;
} }
} }
if (none) { if (none) {
PROC_PRINT("none"); PROC_PRINT("none");
} }
@ -1619,7 +1619,7 @@ static int PROC_READ_FUN(rtai_read_proc)
PROC_PRINT("** RTAI TSC OFFSETs (TSC units, 0 ref. CPU): "); PROC_PRINT("** RTAI TSC OFFSETs (TSC units, 0 ref. CPU): ");
for (i = 0; i < num_online_cpus(); i++) { for (i = 0; i < num_online_cpus(); i++) {
PROC_PRINT("CPU#%d: %ld; ", i, rtai_tsc_ofst[i]); PROC_PRINT("CPU#%d: %ld; ", i, rtai_tsc_ofst[i]);
} }
PROC_PRINT("\n\n"); PROC_PRINT("\n\n");
#endif #endif
PROC_PRINT("** MASK OF CPUs ISOLATED FOR RTAI: 0x%lx.", IsolCpusMask); PROC_PRINT("** MASK OF CPUs ISOLATED FOR RTAI: 0x%lx.", IsolCpusMask);
@ -1639,7 +1639,7 @@ static int rtai_proc_register (void)
if (!rtai_proc_root) { if (!rtai_proc_root) {
printk(KERN_ERR "Unable to initialize /proc/rtai.\n"); printk(KERN_ERR "Unable to initialize /proc/rtai.\n");
return -1; return -1;
} }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
rtai_proc_root->owner = THIS_MODULE; rtai_proc_root->owner = THIS_MODULE;
#endif #endif
@ -1648,7 +1648,7 @@ static int rtai_proc_register (void)
if (!ent) { if (!ent) {
printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n"); printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n");
return -1; return -1;
} }
SET_PROC_READ_ENTRY(ent, rtai_read_proc); SET_PROC_READ_ENTRY(ent, rtai_read_proc);
return 0; return 0;
@ -1845,9 +1845,9 @@ asmlinkage int rt_sync_printk(const char *fmt, ...)
char buf[VSNPRINTF_BUF]; char buf[VSNPRINTF_BUF];
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
vsnprintf(buf, VSNPRINTF_BUF, fmt, args); vsnprintf(buf, VSNPRINTF_BUF, fmt, args);
va_end(args); va_end(args);
hal_set_printk_sync(&rtai_domain); hal_set_printk_sync(&rtai_domain);
return printk("%s", buf); return printk("%s", buf);
} }

View file

@ -96,7 +96,7 @@ static void rtai_release_tickdev(void);
static inline void rtai_setup_periodic_apic (unsigned count, unsigned vector) static inline void rtai_setup_periodic_apic (unsigned count, unsigned vector)
{ {
apic_read(APIC_LVTT); apic_read(APIC_LVTT);
apic_write(APIC_LVTT, APIC_LVT_TIMER_PERIODIC | vector); apic_write(APIC_LVTT, APIC_INTEGRATED(GET_APIC_VERSION(apic_read(APIC_LVR))) ? SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) | APIC_LVT_TIMER_PERIODIC | vector : APIC_LVT_TIMER_PERIODIC | vector);
apic_read(APIC_TMICT); apic_read(APIC_TMICT);
apic_write(APIC_TMICT, count); apic_write(APIC_TMICT, count);
} }
@ -104,7 +104,7 @@ static inline void rtai_setup_periodic_apic (unsigned count, unsigned vector)
static inline void rtai_setup_oneshot_apic (unsigned count, unsigned vector) static inline void rtai_setup_oneshot_apic (unsigned count, unsigned vector)
{ {
apic_read(APIC_LVTT); apic_read(APIC_LVTT);
apic_write(APIC_LVTT, vector); apic_write(APIC_LVTT, APIC_INTEGRATED(GET_APIC_VERSION(apic_read(APIC_LVR))) ? SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV) | vector : vector);
apic_read(APIC_TMICT); apic_read(APIC_TMICT);
apic_write(APIC_TMICT, count); apic_write(APIC_TMICT, count);
} }
@ -201,35 +201,35 @@ RTAI_MODULE_PARM(IsolCpusMask, ulong);
int rt_request_irq (unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode) int rt_request_irq (unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode)
{ {
int ret; int ret;
ret = ipipe_virtualize_irq(&rtai_domain, irq, (void *)handler, cookie, NULL, IPIPE_HANDLE_MASK | IPIPE_WIRED_MASK); ret = ipipe_virtualize_irq(&rtai_domain, irq, (void *)handler, cookie, NULL, IPIPE_HANDLE_MASK | IPIPE_WIRED_MASK);
if (!ret) { if (!ret) {
rtai_realtime_irq[irq].retmode = retmode ? 1 : 0; rtai_realtime_irq[irq].retmode = retmode ? 1 : 0;
if (IsolCpusMask && irq < IPIPE_NR_XIRQS) { if (IsolCpusMask && irq < IPIPE_NR_XIRQS) {
rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask); rtai_realtime_irq[irq].cpumask = rt_assign_irq_to_cpu(irq, IsolCpusMask);
} }
} }
return ret; return ret;
} }
int rt_release_irq (unsigned irq) int rt_release_irq (unsigned irq)
{ {
int ret; int ret;
ret = ipipe_virtualize_irq(&rtai_domain, irq, NULL, NULL, NULL, 0); ret = ipipe_virtualize_irq(&rtai_domain, irq, NULL, NULL, NULL, 0);
if (!ret && IsolCpusMask && irq < IPIPE_NR_XIRQS) { if (!ret && IsolCpusMask && irq < IPIPE_NR_XIRQS) {
rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask); rt_assign_irq_to_cpu(irq, rtai_realtime_irq[irq].cpumask);
} }
return 0; return 0;
} }
int rt_set_irq_ack(unsigned irq, int (*irq_ack)(unsigned int, void *)) int rt_set_irq_ack(unsigned irq, int (*irq_ack)(unsigned int, void *))
{ {
if (irq >= RTAI_NR_IRQS) { if (irq >= RTAI_NR_IRQS) {
return -EINVAL; return -EINVAL;
} }
// rtai_realtime_irq[irq].irq_ack = irq_ack ? irq_ack : (void *)hal_root_domain->irqs[irq].acknowledge; // rtai_realtime_irq[irq].irq_ack = irq_ack ? irq_ack : (void *)hal_root_domain->irqs[irq].acknowledge;
rtai_domain.irqs[irq].ackfn = irq_ack ? (void *)irq_ack : hal_root_domain->irqs[irq].ackfn; rtai_domain.irqs[irq].ackfn = irq_ack ? (void *)irq_ack : hal_root_domain->irqs[irq].ackfn;
return 0; return 0;
} }
void rt_set_irq_cookie (unsigned irq, void *cookie) void rt_set_irq_cookie (unsigned irq, void *cookie)
@ -660,10 +660,10 @@ void rt_pend_linux_irq (unsigned irq)
RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq) RTAI_SYSCALL_MODE void usr_rt_pend_linux_irq (unsigned irq)
{ {
unsigned long flags; unsigned long flags;
rtai_save_flags_and_cli(flags); rtai_save_flags_and_cli(flags);
hal_pend_uncond(irq, rtai_cpuid()); hal_pend_uncond(irq, rtai_cpuid());
rtai_restore_flags(flags); rtai_restore_flags(flags);
} }
/** /**
@ -924,8 +924,8 @@ void rt_request_apic_timers (void (*handler)(void), struct apic_timer_setup_data
} }
} }
rtai_critical_exit(flags);
rtai_request_tickdev(handler); rtai_request_tickdev(handler);
rtai_critical_exit(flags);
} }
/** /**
@ -942,8 +942,8 @@ void rt_free_apic_timers(void)
rtai_release_tickdev(); rtai_release_tickdev();
rtai_sync_level = 3; rtai_sync_level = 3;
rtai_setup_periodic_apic(RTAI_APIC_ICOUNT,LOCAL_TIMER_VECTOR); rtai_setup_periodic_apic(RTAI_APIC_ICOUNT,LOCAL_TIMER_VECTOR);
rtai_critical_exit(flags);
rt_release_irq(RTAI_APIC_TIMER_IPI); rt_release_irq(RTAI_APIC_TIMER_IPI);
rtai_critical_exit(flags);
} }
/** /**
@ -967,22 +967,22 @@ void rt_free_apic_timers(void)
unsigned long rt_assign_irq_to_cpu (int irq, unsigned long cpumask) unsigned long rt_assign_irq_to_cpu (int irq, unsigned long cpumask)
{ {
if (irq >= IPIPE_NR_XIRQS || &rtai_irq_desc(irq) == NULL || rtai_irq_desc_chip(irq) == NULL || rtai_irq_desc_chip(irq)->irq_set_affinity == NULL) { if (irq >= IPIPE_NR_XIRQS || &rtai_irq_desc(irq) == NULL || rtai_irq_desc_chip(irq) == NULL || rtai_irq_desc_chip(irq)->irq_set_affinity == NULL) {
return 0; return 0;
} else { } else {
unsigned long oldmask, flags; unsigned long oldmask, flags;
rtai_save_flags_and_cli(flags); rtai_save_flags_and_cli(flags);
spin_lock(&rtai_iset_lock); spin_lock(&rtai_iset_lock);
cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity); cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity);
hal_set_irq_affinity(irq, CPUMASK_T(cpumask)); hal_set_irq_affinity(irq, CPUMASK_T(cpumask));
if (oldmask) { if (oldmask) {
rtai_old_irq_affinity[irq] = oldmask; rtai_old_irq_affinity[irq] = oldmask;
} }
spin_unlock(&rtai_iset_lock); spin_unlock(&rtai_iset_lock);
rtai_restore_flags(flags); rtai_restore_flags(flags);
return oldmask; return oldmask;
} }
} }
/** /**
@ -1006,28 +1006,28 @@ unsigned long rt_assign_irq_to_cpu (int irq, unsigned long cpumask)
*/ */
unsigned long rt_reset_irq_to_sym_mode (int irq) unsigned long rt_reset_irq_to_sym_mode (int irq)
{ {
unsigned long oldmask, flags; unsigned long oldmask, flags;
if (irq >= IPIPE_NR_XIRQS) { if (irq >= IPIPE_NR_XIRQS) {
return 0; return 0;
} else { } else {
rtai_save_flags_and_cli(flags); rtai_save_flags_and_cli(flags);
spin_lock(&rtai_iset_lock); spin_lock(&rtai_iset_lock);
if (rtai_old_irq_affinity[irq] == 0) { if (rtai_old_irq_affinity[irq] == 0) {
spin_unlock(&rtai_iset_lock); spin_unlock(&rtai_iset_lock);
rtai_restore_flags(flags); rtai_restore_flags(flags);
return -EINVAL; return -EINVAL;
} }
cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity); cpumask_copy((void *)&oldmask, irq_to_desc(irq)->irq_data.affinity);
if (rtai_old_irq_affinity[irq]) { if (rtai_old_irq_affinity[irq]) {
hal_set_irq_affinity(irq, CPUMASK_T(rtai_old_irq_affinity[irq])); hal_set_irq_affinity(irq, CPUMASK_T(rtai_old_irq_affinity[irq]));
rtai_old_irq_affinity[irq] = 0; rtai_old_irq_affinity[irq] = 0;
} }
spin_unlock(&rtai_iset_lock); spin_unlock(&rtai_iset_lock);
rtai_restore_flags(flags); rtai_restore_flags(flags);
return oldmask; return oldmask;
} }
} }
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
@ -1211,41 +1211,41 @@ do { rtai_cli(); rt_restore_switch_to_linux(sflags, cpuid); } while (0)
void (*rtai_isr_sched)(int cpuid); void (*rtai_isr_sched)(int cpuid);
EXPORT_SYMBOL(rtai_isr_sched); EXPORT_SYMBOL(rtai_isr_sched);
#define RTAI_SCHED_ISR_LOCK() \ #define RTAI_SCHED_ISR_LOCK() \
do { \ do { \
if (!rt_scheduling[cpuid].locked++) { \ if (!rt_scheduling[cpuid].locked++) { \
rt_scheduling[cpuid].rqsted = 0; \ rt_scheduling[cpuid].rqsted = 0; \
} \ } \
} while (0) } while (0)
#define RTAI_SCHED_ISR_UNLOCK() \ #define RTAI_SCHED_ISR_UNLOCK() \
do { \ do { \
if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \ if (rt_scheduling[cpuid].locked && !(--rt_scheduling[cpuid].locked)) { \
if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_sched) { \ if (rt_scheduling[cpuid].rqsted > 0 && rtai_isr_sched) { \
rtai_isr_sched(cpuid); \ rtai_isr_sched(cpuid); \
} \ } \
} \ } \
} while (0) } while (0)
#else /* !CONFIG_RTAI_SCHED_ISR_LOCK */ #else /* !CONFIG_RTAI_SCHED_ISR_LOCK */
#define RTAI_SCHED_ISR_LOCK() \ #define RTAI_SCHED_ISR_LOCK() \
do { } while (0) do { } while (0)
// do { cpuid = rtai_cpuid(); } while (0) // do { cpuid = rtai_cpuid(); } while (0)
#define RTAI_SCHED_ISR_UNLOCK() \ #define RTAI_SCHED_ISR_UNLOCK() \
do { } while (0) do { } while (0)
#endif /* CONFIG_RTAI_SCHED_ISR_LOCK */ #endif /* CONFIG_RTAI_SCHED_ISR_LOCK */
static int rtai_hirq_dispatcher (int irq) static int rtai_hirq_dispatcher (int irq)
{ {
unsigned long cpuid; unsigned long cpuid;
if (rtai_domain.irqs[irq].handler) { if (rtai_domain.irqs[irq].handler) {
unsigned long sflags; unsigned long sflags;
HAL_LOCK_LINUX(); HAL_LOCK_LINUX();
RTAI_SCHED_ISR_LOCK(); RTAI_SCHED_ISR_LOCK();
rtai_domain.irqs[irq].handler(irq, rtai_domain.irqs[irq].cookie); rtai_domain.irqs[irq].handler(irq, rtai_domain.irqs[irq].cookie);
RTAI_SCHED_ISR_UNLOCK(); RTAI_SCHED_ISR_UNLOCK();
HAL_UNLOCK_LINUX(); HAL_UNLOCK_LINUX();
if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) { if (rtai_realtime_irq[irq].retmode || test_bit(IPIPE_STALL_FLAG, ROOT_STATUS_ADR(cpuid))) {
return 0; return 0;
} }
} }
rtai_sti(); rtai_sti();
hal_fast_flush_pipeline(cpuid); hal_fast_flush_pipeline(cpuid);
return 0; return 0;
@ -1269,38 +1269,38 @@ static int rtai_trap_fault (unsigned event, void *evdata)
{ {
#ifdef HINT_DIAG_TRAPS #ifdef HINT_DIAG_TRAPS
static unsigned long traps_in_hard_intr = 0; static unsigned long traps_in_hard_intr = 0;
do { do {
unsigned long flags; unsigned long flags;
rtai_save_flags_and_cli(flags); rtai_save_flags_and_cli(flags);
if (!test_bit(RTAI_IFLAG, &flags)) { if (!test_bit(RTAI_IFLAG, &flags)) {
if (!test_and_set_bit(event, &traps_in_hard_intr)) { if (!test_and_set_bit(event, &traps_in_hard_intr)) {
HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr);); HINT_DIAG_MSG(rt_printk("TRAP %d HAS INTERRUPT DISABLED (TRAPS PICTURE %lx).\n", event, traps_in_hard_intr););
} }
} }
} while (0); } while (0);
#endif #endif
static const int trap2sig[] = { static const int trap2sig[] = {
SIGFPE, // 0 - Divide error SIGFPE, // 0 - Divide error
SIGTRAP, // 1 - Debug SIGTRAP, // 1 - Debug
SIGSEGV, // 2 - NMI (but we ignore these) SIGSEGV, // 2 - NMI (but we ignore these)
SIGTRAP, // 3 - Software breakpoint SIGTRAP, // 3 - Software breakpoint
SIGSEGV, // 4 - Overflow SIGSEGV, // 4 - Overflow
SIGSEGV, // 5 - Bounds SIGSEGV, // 5 - Bounds
SIGILL, // 6 - Invalid opcode SIGILL, // 6 - Invalid opcode
SIGSEGV, // 7 - Device not available SIGSEGV, // 7 - Device not available
SIGSEGV, // 8 - Double fault SIGSEGV, // 8 - Double fault
SIGFPE, // 9 - Coprocessor segment overrun SIGFPE, // 9 - Coprocessor segment overrun
SIGSEGV, // 10 - Invalid TSS SIGSEGV, // 10 - Invalid TSS
SIGBUS, // 11 - Segment not present SIGBUS, // 11 - Segment not present
SIGBUS, // 12 - Stack segment SIGBUS, // 12 - Stack segment
SIGSEGV, // 13 - General protection fault SIGSEGV, // 13 - General protection fault
SIGSEGV, // 14 - Page fault SIGSEGV, // 14 - Page fault
0, // 15 - Spurious interrupt 0, // 15 - Spurious interrupt
SIGFPE, // 16 - Coprocessor error SIGFPE, // 16 - Coprocessor error
SIGBUS, // 17 - Alignment check SIGBUS, // 17 - Alignment check
SIGSEGV, // 18 - Reserved SIGSEGV, // 18 - Reserved
SIGFPE, // 19 - XMM fault SIGFPE, // 19 - XMM fault
0,0,0,0,0,0,0,0,0,0,0,0 0,0,0,0,0,0,0,0,0,0,0,0
}; };
@ -1373,13 +1373,13 @@ EXPORT_SYMBOL(rtai_usrq_dispatcher);
static int intercept_syscall_prologue(unsigned long event, struct pt_regs *regs) static int intercept_syscall_prologue(unsigned long event, struct pt_regs *regs)
{ {
if (likely(regs->LINUX_SYSCALL_NR >= RTAI_SYSCALL_NR)) { if (likely(regs->LINUX_SYSCALL_NR >= RTAI_SYSCALL_NR)) {
unsigned long srq = regs->LINUX_SYSCALL_REG1; unsigned long srq = regs->LINUX_SYSCALL_REG1;
IF_IS_A_USI_SRQ_CALL_IT(srq, regs->LINUX_SYSCALL_REG2, (long long *)regs->LINUX_SYSCALL_REG3, regs->LINUX_SYSCALL_FLAGS, 1); IF_IS_A_USI_SRQ_CALL_IT(srq, regs->LINUX_SYSCALL_REG2, (long long *)regs->LINUX_SYSCALL_REG3, regs->LINUX_SYSCALL_FLAGS, 1);
*((long long *)regs->LINUX_SYSCALL_REG3) = rtai_usrq_dispatcher(srq, regs->LINUX_SYSCALL_REG2); *((long long *)regs->LINUX_SYSCALL_REG3) = rtai_usrq_dispatcher(srq, regs->LINUX_SYSCALL_REG2);
hal_test_and_fast_flush_pipeline(rtai_cpuid()); hal_test_and_fast_flush_pipeline(rtai_cpuid());
return 1; return 1;
} }
return 0; return 0;
} }
@ -1394,8 +1394,8 @@ static unsigned long hal_request_apic_freq(void);
static void rtai_install_archdep (void) static void rtai_install_archdep (void)
{ {
ipipe_select_timers(cpu_active_mask); ipipe_select_timers(cpu_present_mask);
hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue); hal_catch_event(hal_root_domain, HAL_SYSCALL_PROLOGUE, (void *)intercept_syscall_prologue);
if (rtai_cpufreq_arg == 0) { if (rtai_cpufreq_arg == 0) {
struct hal_sysinfo_struct sysinfo; struct hal_sysinfo_struct sysinfo;
@ -1453,10 +1453,10 @@ extern void *sys_call_table[];
void rtai_set_linux_task_priority (struct task_struct *task, int policy, int prio) void rtai_set_linux_task_priority (struct task_struct *task, int policy, int prio)
{ {
hal_set_linux_task_priority(task, policy, prio); hal_set_linux_task_priority(task, policy, prio);
if (task->rt_priority != prio || task->policy != policy) { if (task->rt_priority != prio || task->policy != policy) {
printk("RTAI[hal]: sched_setscheduler(policy = %d, prio = %d) failed, (%s -- pid = %d)\n", policy, prio, task->comm, task->pid); printk("RTAI[hal]: sched_setscheduler(policy = %d, prio = %d) failed, (%s -- pid = %d)\n", policy, prio, task->comm, task->pid);
} }
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
@ -1490,7 +1490,7 @@ static int PROC_READ_FUN(rtai_read_proc)
} }
PROC_PRINT("\n #%d at %p", i, rtai_domain.irqs[i].handler); PROC_PRINT("\n #%d at %p", i, rtai_domain.irqs[i].handler);
} }
} }
if (none) { if (none) {
PROC_PRINT("none"); PROC_PRINT("none");
} }
@ -1506,7 +1506,7 @@ static int PROC_READ_FUN(rtai_read_proc)
PROC_PRINT("#%d ", i); PROC_PRINT("#%d ", i);
none = 0; none = 0;
} }
} }
if (none) { if (none) {
PROC_PRINT("none"); PROC_PRINT("none");
} }
@ -1518,7 +1518,7 @@ static int PROC_READ_FUN(rtai_read_proc)
for (i = 0; i < num_online_cpus(); i++) { for (i = 0; i < num_online_cpus(); i++) {
PROC_PRINT("CPU#%d: %ld; ", i, rtai_tsc_ofst[i]); PROC_PRINT("CPU#%d: %ld; ", i, rtai_tsc_ofst[i]);
} }
PROC_PRINT("\n\n"); PROC_PRINT("\n\n");
#endif #endif
PROC_PRINT("** MASK OF CPUs ISOLATED FOR RTAI: 0x%lx.", IsolCpusMask); PROC_PRINT("** MASK OF CPUs ISOLATED FOR RTAI: 0x%lx.", IsolCpusMask);
PROC_PRINT("\n\n"); PROC_PRINT("\n\n");
@ -1537,7 +1537,7 @@ static int rtai_proc_register (void)
if (!rtai_proc_root) { if (!rtai_proc_root) {
printk(KERN_ERR "Unable to initialize /proc/rtai.\n"); printk(KERN_ERR "Unable to initialize /proc/rtai.\n");
return -1; return -1;
} }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
rtai_proc_root->owner = THIS_MODULE; rtai_proc_root->owner = THIS_MODULE;
#endif #endif
@ -1546,7 +1546,7 @@ static int rtai_proc_register (void)
if (!ent) { if (!ent) {
printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n"); printk(KERN_ERR "Unable to initialize /proc/rtai/hal.\n");
return -1; return -1;
} }
SET_PROC_READ_ENTRY(ent, rtai_read_proc); SET_PROC_READ_ENTRY(ent, rtai_read_proc);
return 0; return 0;
@ -1580,11 +1580,11 @@ extern void *hal_irq_handler;
#undef ack_bad_irq #undef ack_bad_irq
void ack_bad_irq(unsigned int irq) void ack_bad_irq(unsigned int irq)
{ {
printk("unexpected IRQ trap at vector %02x\n", irq); printk("unexpected IRQ trap at vector %02x\n", irq);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
if (cpu_has_apic) { if (cpu_has_apic) {
__ack_APIC_irq(); __ack_APIC_irq();
} }
#endif #endif
} }
@ -1621,13 +1621,13 @@ int __rtai_hal_init (void)
return -1; return -1;
} }
for (trapnr = 0; trapnr < RTAI_NR_IRQS; trapnr++) { for (trapnr = 0; trapnr < RTAI_NR_IRQS; trapnr++) {
rtai_domain.irqs[trapnr].ackfn = (void *)hal_root_domain->irqs[trapnr].ackfn; rtai_domain.irqs[trapnr].ackfn = (void *)hal_root_domain->irqs[trapnr].ackfn;
} }
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
for (trapnr = 0; trapnr < num_online_cpus(); trapnr++) { for (trapnr = 0; trapnr < num_online_cpus(); trapnr++) {
ipipe_root_status[trapnr] = &hal_root_domain->cpudata[trapnr].status; ipipe_root_status[trapnr] = &hal_root_domain->cpudata[trapnr].status;
} }
#endif #endif
ipipe_virtualize_irq(hal_root_domain, rtai_sysreq_virq, (void *)rtai_lsrq_dispatcher, NULL, NULL, IPIPE_HANDLE_MASK); ipipe_virtualize_irq(hal_root_domain, rtai_sysreq_virq, (void *)rtai_lsrq_dispatcher, NULL, NULL, IPIPE_HANDLE_MASK);
@ -1692,9 +1692,9 @@ void __rtai_hal_exit (void)
for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) { for (trapnr = 0; trapnr < HAL_NR_FAULTS; trapnr++) {
hal_catch_event(hal_root_domain, trapnr, NULL); hal_catch_event(hal_root_domain, trapnr, NULL);
} }
hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, NULL, NULL, 0); hal_virtualize_irq(hal_root_domain, rtai_sysreq_virq, NULL, NULL, 0);
hal_free_irq(rtai_sysreq_virq); hal_free_irq(rtai_sysreq_virq);
rtai_uninstall_archdep(); rtai_uninstall_archdep();
if (IsolCpusMask) { if (IsolCpusMask) {
for (trapnr = 0; trapnr < IPIPE_NR_XIRQS; trapnr++) { for (trapnr = 0; trapnr < IPIPE_NR_XIRQS; trapnr++) {
@ -1715,25 +1715,25 @@ module_exit(__rtai_hal_exit);
#define VSNPRINTF_BUF 256 #define VSNPRINTF_BUF 256
asmlinkage int rt_printk(const char *fmt, ...) asmlinkage int rt_printk(const char *fmt, ...)
{ {
char buf[VSNPRINTF_BUF]; char buf[VSNPRINTF_BUF];
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
vsnprintf(buf, VSNPRINTF_BUF, fmt, args); vsnprintf(buf, VSNPRINTF_BUF, fmt, args);
va_end(args); va_end(args);
return printk("%s", buf); return printk("%s", buf);
} }
asmlinkage int rt_sync_printk(const char *fmt, ...) asmlinkage int rt_sync_printk(const char *fmt, ...)
{ {
char buf[VSNPRINTF_BUF]; char buf[VSNPRINTF_BUF];
va_list args; va_list args;
va_start(args, fmt); va_start(args, fmt);
vsnprintf(buf, VSNPRINTF_BUF, fmt, args); vsnprintf(buf, VSNPRINTF_BUF, fmt, args);
va_end(args); va_end(args);
hal_set_printk_sync(&rtai_domain); hal_set_printk_sync(&rtai_domain);
return printk("%s", buf); return printk("%s", buf);
} }
EXPORT_SYMBOL(rtai_realtime_irq); EXPORT_SYMBOL(rtai_realtime_irq);
@ -1871,9 +1871,9 @@ static void rtai_release_tickdev(void)
static unsigned long hal_request_apic_freq(void) static unsigned long hal_request_apic_freq(void)
{ {
struct hal_sysinfo_struct sysinfo; struct hal_sysinfo_struct sysinfo;
hal_get_sysinfo(&sysinfo); hal_get_sysinfo(&sysinfo);
return sysinfo.sys_hrtimer_freq; return sysinfo.sys_hrtimer_freq;
#if 0 #if 0
unsigned long cpuid, avrg_freq, freq; unsigned long cpuid, avrg_freq, freq;
for (avrg_freq = freq = cpuid = 0; cpuid < num_online_cpus(); cpuid++) { for (avrg_freq = freq = cpuid = 0; cpuid < num_online_cpus(); cpuid++) {

View file

@ -5250,6 +5250,67 @@ index f04e25f..5fa6be6 100644
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE) select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE) select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE)
help help
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 79bbc21..f1805d4 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -5,6 +5,7 @@ config IOMMU_API
menuconfig IOMMU_SUPPORT
bool "IOMMU Hardware Support"
default y
+ depends on !IPIPE
---help---
Say Y here if you want to compile device drivers for IO Memory
Management Units into the kernel. These devices usually allow to
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 893503f..d0d856d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -28,7 +28,7 @@ config PCI_DEBUG
config PCI_REALLOC_ENABLE_AUTO
bool "Enable PCI resource re-allocation detection"
- depends on PCI
+ depends on PCI && PCI_IOV
help
Say Y here if you want the PCI core to detect if PCI resource
re-allocation needs to be enabled. You can always use pci=realloc=on
@@ -72,7 +72,7 @@ config PCI_ATS
config PCI_IOV
bool "PCI IOV support"
- depends on PCI
+ depends on PCI && !IPIPE
select PCI_ATS
help
I/O Virtualization is a PCI feature supported by some devices
@@ -83,7 +83,7 @@ config PCI_IOV
config PCI_PRI
bool "PCI PRI support"
- depends on PCI
+ depends on PCI && IOMMU_SUPPORT
select PCI_ATS
help
PRI is the PCI Page Request Interface. It allows PCI devices that are
@@ -93,7 +93,7 @@ config PCI_PRI
config PCI_PASID
bool "PCI PASID support"
- depends on PCI
+ depends on PCI && IOMMU_SUPPORT
select PCI_ATS
help
Process Address Space Identifiers (PASIDs) can be used by PCI devices
@@ -106,7 +106,7 @@ config PCI_PASID
config PCI_IOAPIC
bool "PCI IO-APIC hotplug support" if X86
- depends on PCI
+ depends on PCI && !IPIPE
depends on ACPI
depends on X86_IO_APIC
default !X86
diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c
index 6e373ea..f32da7b 100644 index 6e373ea..f32da7b 100644
--- a/drivers/pci/htirq.c --- a/drivers/pci/htirq.c

View file

@ -472,19 +472,19 @@ static void conf_usage(const char *progname)
printf("Usage: %s [option] <kconfig-file>\n", progname); printf("Usage: %s [option] <kconfig-file>\n", progname);
printf("[option] is _one_ of the following:\n"); printf("[option] is _one_ of the following:\n");
printf(" --listnewconfig List new options\n"); printf(" --listnewconfig List new options\n");
printf(" --oldaskconfig Start a new configuration using a line-oriented program\n"); printf(" --oldaskconfig Start a new configuration using a line-oriented program\n");
printf(" --oldconfig Update a configuration using a provided .rtai_config as base\n"); printf(" --oldconfig Update a configuration using a provided .rtai_config as base\n");
printf(" --silentoldconfig Same as oldconfig, but quietly, additionally update deps\n"); printf(" --silentoldconfig Same as oldconfig, but quietly, additionally update deps\n");
printf(" --olddefconfig Same as silentoldconfig but sets new symbols to their default value\n"); printf(" --olddefconfig Same as silentoldconfig but sets new symbols to their default value\n");
printf(" --oldnoconfig An alias of olddefconfig\n"); printf(" --oldnoconfig An alias of olddefconfig\n");
printf(" --defconfig <file> New config with default defined in <file>\n"); printf(" --defconfig <file> New config with default defined in <file>\n");
printf(" --savedefconfig <file> Save the minimal current configuration to <file>\n"); printf(" --savedefconfig <file> Save the minimal current configuration to <file>\n");
printf(" --allnoconfig New config where all options are answered with no\n"); printf(" --allnoconfig New config where all options are answered with no\n");
printf(" --allyesconfig New config where all options are answered with yes\n"); printf(" --allyesconfig New config where all options are answered with yes\n");
printf(" --allmodconfig New config where all options are answered with mod\n"); printf(" --allmodconfig New config where all options are answered with mod\n");
printf(" --alldefconfig New config with all symbols set to default\n"); printf(" --alldefconfig New config with all symbols set to default\n");
printf(" --randconfig New config with random answer to all options\n"); printf(" --randconfig New config with random answer to all options\n");
} }
int main(int ac, char **av) int main(int ac, char **av)

View file

@ -100,7 +100,7 @@ struct symbol {
#define SYMBOL_WARNED 0x8000 /* warning has been issued */ #define SYMBOL_WARNED 0x8000 /* warning has been issued */
/* Set when symbol.def[] is used */ /* Set when symbol.def[] is used */
#define SYMBOL_DEF 0x10000 /* First bit of SYMBOL_DEF */ #define SYMBOL_DEF 0x10000 /* First bit of SYMBOL_DEF */
#define SYMBOL_DEF_USER 0x10000 /* symbol.def[S_DEF_USER] is valid */ #define SYMBOL_DEF_USER 0x10000 /* symbol.def[S_DEF_USER] is valid */
#define SYMBOL_DEF_AUTO 0x20000 /* symbol.def[S_DEF_AUTO] is valid */ #define SYMBOL_DEF_AUTO 0x20000 /* symbol.def[S_DEF_AUTO] is valid */
#define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */ #define SYMBOL_DEF3 0x40000 /* symbol.def[S_DEF_3] is valid */
@ -113,12 +113,12 @@ struct symbol {
* with a config "symbol". * with a config "symbol".
* Sample: * Sample:
* config FOO * config FOO
* default y * default y
* prompt "foo prompt" * prompt "foo prompt"
* select BAR * select BAR
* config BAZ * config BAZ
* int "BAZ Value" * int "BAZ Value"
* range 1..255 * range 1..255
*/ */
enum prop_type { enum prop_type {
P_UNKNOWN, P_UNKNOWN,
@ -135,16 +135,16 @@ enum prop_type {
struct property { struct property {
struct property *next; /* next property - null if last */ struct property *next; /* next property - null if last */
struct symbol *sym; /* the symbol for which the property is associated */ struct symbol *sym; /* the symbol for which the property is associated */
enum prop_type type; /* type of property */ enum prop_type type; /* type of property */
const char *text; /* the prompt value - P_PROMPT, P_MENU, P_COMMENT */ const char *text; /* the prompt value - P_PROMPT, P_MENU, P_COMMENT */
struct expr_value visible; struct expr_value visible;
struct expr *expr; /* the optional conditional part of the property */ struct expr *expr; /* the optional conditional part of the property */
struct menu *menu; /* the menu the property are associated with struct menu *menu; /* the menu the property are associated with
* valid for: P_SELECT, P_RANGE, P_CHOICE, * valid for: P_SELECT, P_RANGE, P_CHOICE,
* P_PROMPT, P_DEFAULT, P_MENU, P_COMMENT */ * P_PROMPT, P_DEFAULT, P_MENU, P_COMMENT */
struct file *file; /* what file was this property defined */ struct file *file; /* what file was this property defined */
int lineno; /* what lineno was this property defined */ int lineno; /* what lineno was this property defined */
}; };
#define for_all_properties(sym, st, tok) \ #define for_all_properties(sym, st, tok) \

View file

@ -176,75 +176,75 @@ static const char *xpm_symbol_no[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_symbol_mod[] = { static const char *xpm_symbol_mod[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" . . ", " . . ",
" . . ", " . . ",
" . .. . ", " . .. . ",
" . .... . ", " . .... . ",
" . .... . ", " . .... . ",
" . .. . ", " . .. . ",
" . . ", " . . ",
" . . ", " . . ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_symbol_yes[] = { static const char *xpm_symbol_yes[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" . . ", " . . ",
" . . ", " . . ",
" . . . ", " . . . ",
" . .. . ", " . .. . ",
" . . .. . ", " . . .. . ",
" . .... . ", " . .... . ",
" . .. . ", " . .. . ",
" . . ", " . . ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_choice_no[] = { static const char *xpm_choice_no[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .... ", " .... ",
" .. .. ", " .. .. ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" . . ", " . . ",
" .. .. ", " .. .. ",
" .... ", " .... ",
" "}; " "};
static const char *xpm_choice_yes[] = { static const char *xpm_choice_yes[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .... ", " .... ",
" .. .. ", " .. .. ",
" . . ", " . . ",
@ -255,30 +255,30 @@ static const char *xpm_choice_yes[] = {
" . . ", " . . ",
" .. .. ", " .. .. ",
" .... ", " .... ",
" "}; " "};
static const char *xpm_menu[] = { static const char *xpm_menu[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" . . ", " . . ",
" . .. . ", " . .. . ",
" . .... . ", " . .... . ",
" . ...... . ", " . ...... . ",
" . ...... . ", " . ...... . ",
" . .... . ", " . .... . ",
" . .. . ", " . .. . ",
" . . ", " . . ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_menu_inv[] = { static const char *xpm_menu_inv[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" .......... ", " .......... ",
" .. ...... ", " .. ...... ",
@ -289,38 +289,38 @@ static const char *xpm_menu_inv[] = {
" .. ...... ", " .. ...... ",
" .......... ", " .......... ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_menuback[] = { static const char *xpm_menuback[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" .......... ", " .......... ",
" . . ", " . . ",
" . .. . ", " . .. . ",
" . .... . ", " . .... . ",
" . ...... . ", " . ...... . ",
" . ...... . ", " . ...... . ",
" . .... . ", " . .... . ",
" . .. . ", " . .. . ",
" . . ", " . . ",
" .......... ", " .......... ",
" "}; " "};
static const char *xpm_void[] = { static const char *xpm_void[] = {
"12 12 2 1", "12 12 2 1",
" c white", " c white",
". c black", ". c black",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" ", " ",
" "}; " "};

View file

@ -10,12 +10,12 @@
/** /**
* container_of - cast a member of a structure out to the containing structure * container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member. * @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in. * @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct. * @member: the name of the member within the struct.
* *
*/ */
#define container_of(ptr, type, member) ({ \ #define container_of(ptr, type, member) ({ \
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );}) (type *)( (char *)__mptr - offsetof(type,member) );})

View file

@ -27,27 +27,27 @@
* *) A bugfix for the Page-Down problem * *) A bugfix for the Page-Down problem
* *
* *) Formerly when I used Page Down and Page Up, the cursor would be set * *) Formerly when I used Page Down and Page Up, the cursor would be set
* to the first position in the menu box. Now lxdialog is a bit * to the first position in the menu box. Now lxdialog is a bit
* smarter and works more like other menu systems (just have a look at * smarter and works more like other menu systems (just have a look at
* it). * it).
* *
* *) Formerly if I selected something my scrolling would be broken because * *) Formerly if I selected something my scrolling would be broken because
* lxdialog is re-invoked by the Menuconfig shell script, can't * lxdialog is re-invoked by the Menuconfig shell script, can't
* remember the last scrolling position, and just sets it so that the * remember the last scrolling position, and just sets it so that the
* cursor is at the bottom of the box. Now it writes the temporary file * cursor is at the bottom of the box. Now it writes the temporary file
* lxdialog.scrltmp which contains this information. The file is * lxdialog.scrltmp which contains this information. The file is
* deleted by lxdialog if the user leaves a submenu or enters a new * deleted by lxdialog if the user leaves a submenu or enters a new
* one, but it would be nice if Menuconfig could make another "rm -f" * one, but it would be nice if Menuconfig could make another "rm -f"
* just to be sure. Just try it out - you will recognise a difference! * just to be sure. Just try it out - you will recognise a difference!
* *
* [ 1998-06-14 ] * [ 1998-06-14 ]
* *
* *) Now lxdialog is crash-safe against broken "lxdialog.scrltmp" files * *) Now lxdialog is crash-safe against broken "lxdialog.scrltmp" files
* and menus change their size on the fly. * and menus change their size on the fly.
* *
* *) If for some reason the last scrolling position is not saved by * *) If for some reason the last scrolling position is not saved by
* lxdialog, it sets the scrolling so that the selected item is in the * lxdialog, it sets the scrolling so that the selected item is in the
* middle of the menu box, not at the bottom. * middle of the menu box, not at the bottom.
* *
* 02 January 1999, Michael Elizabeth Chastain (mec@shout.net) * 02 January 1999, Michael Elizabeth Chastain (mec@shout.net)
* Reset 'scroll' to 0 if the value from lxdialog.scrltmp is bogus. * Reset 'scroll' to 0 if the value from lxdialog.scrltmp is bogus.

View file

@ -62,7 +62,7 @@ static void set_mono_theme(void)
} }
#define DLG_COLOR(dialog, f, b, h) \ #define DLG_COLOR(dialog, f, b, h) \
do { \ do { \
dlg.dialog.fg = (f); \ dlg.dialog.fg = (f); \
dlg.dialog.bg = (b); \ dlg.dialog.bg = (b); \
dlg.dialog.hl = (h); \ dlg.dialog.hl = (h); \
@ -70,35 +70,35 @@ do { \
static void set_classic_theme(void) static void set_classic_theme(void)
{ {
DLG_COLOR(screen, COLOR_CYAN, COLOR_BLUE, true); DLG_COLOR(screen, COLOR_CYAN, COLOR_BLUE, true);
DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(shadow, COLOR_BLACK, COLOR_BLACK, true);
DLG_COLOR(dialog, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(dialog, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(title, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(title, COLOR_YELLOW, COLOR_WHITE, true);
DLG_COLOR(border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(border, COLOR_WHITE, COLOR_WHITE, true);
DLG_COLOR(button_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(button_active, COLOR_WHITE, COLOR_BLUE, true);
DLG_COLOR(button_inactive, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(button_inactive, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(button_key_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(button_key_active, COLOR_WHITE, COLOR_BLUE, true);
DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_WHITE, false); DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_WHITE, false);
DLG_COLOR(button_label_active, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(button_label_active, COLOR_YELLOW, COLOR_BLUE, true);
DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_WHITE, true); DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_WHITE, true);
DLG_COLOR(inputbox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(inputbox, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(inputbox_border, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(inputbox_border, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(searchbox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(searchbox, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_WHITE, true);
DLG_COLOR(searchbox_border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(searchbox_border, COLOR_WHITE, COLOR_WHITE, true);
DLG_COLOR(position_indicator, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(position_indicator, COLOR_YELLOW, COLOR_WHITE, true);
DLG_COLOR(menubox, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(menubox, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(menubox_border, COLOR_WHITE, COLOR_WHITE, true); DLG_COLOR(menubox_border, COLOR_WHITE, COLOR_WHITE, true);
DLG_COLOR(item, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(item, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(item_selected, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(item_selected, COLOR_WHITE, COLOR_BLUE, true);
DLG_COLOR(tag, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(tag, COLOR_YELLOW, COLOR_WHITE, true);
DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_BLUE, true);
DLG_COLOR(tag_key, COLOR_YELLOW, COLOR_WHITE, true); DLG_COLOR(tag_key, COLOR_YELLOW, COLOR_WHITE, true);
DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_BLUE, true);
DLG_COLOR(check, COLOR_BLACK, COLOR_WHITE, false); DLG_COLOR(check, COLOR_BLACK, COLOR_WHITE, false);
DLG_COLOR(check_selected, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(check_selected, COLOR_WHITE, COLOR_BLUE, true);
DLG_COLOR(uarrow, COLOR_GREEN, COLOR_WHITE, true); DLG_COLOR(uarrow, COLOR_GREEN, COLOR_WHITE, true);
DLG_COLOR(darrow, COLOR_GREEN, COLOR_WHITE, true); DLG_COLOR(darrow, COLOR_GREEN, COLOR_WHITE, true);
} }
static void set_blackbg_theme(void) static void set_blackbg_theme(void)
@ -109,34 +109,34 @@ static void set_blackbg_theme(void)
DLG_COLOR(title, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(title, COLOR_RED, COLOR_BLACK, false);
DLG_COLOR(border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(border, COLOR_BLACK, COLOR_BLACK, true);
DLG_COLOR(button_active, COLOR_YELLOW, COLOR_RED, false); DLG_COLOR(button_active, COLOR_YELLOW, COLOR_RED, false);
DLG_COLOR(button_inactive, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(button_inactive, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_RED, true);
DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(button_key_inactive, COLOR_RED, COLOR_BLACK, false);
DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_RED, false); DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_RED, false);
DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(button_label_inactive, COLOR_BLACK, COLOR_BLACK, true);
DLG_COLOR(inputbox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(inputbox, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(inputbox_border, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(inputbox_border, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(searchbox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(searchbox, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_BLACK, true); DLG_COLOR(searchbox_title, COLOR_YELLOW, COLOR_BLACK, true);
DLG_COLOR(searchbox_border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(searchbox_border, COLOR_BLACK, COLOR_BLACK, true);
DLG_COLOR(position_indicator, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(position_indicator, COLOR_RED, COLOR_BLACK, false);
DLG_COLOR(menubox, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(menubox, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(menubox_border, COLOR_BLACK, COLOR_BLACK, true); DLG_COLOR(menubox_border, COLOR_BLACK, COLOR_BLACK, true);
DLG_COLOR(item, COLOR_WHITE, COLOR_BLACK, false); DLG_COLOR(item, COLOR_WHITE, COLOR_BLACK, false);
DLG_COLOR(item_selected, COLOR_WHITE, COLOR_RED, false); DLG_COLOR(item_selected, COLOR_WHITE, COLOR_RED, false);
DLG_COLOR(tag, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(tag, COLOR_RED, COLOR_BLACK, false);
DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(tag_selected, COLOR_YELLOW, COLOR_RED, true);
DLG_COLOR(tag_key, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(tag_key, COLOR_RED, COLOR_BLACK, false);
DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(tag_key_selected, COLOR_YELLOW, COLOR_RED, true);
DLG_COLOR(check, COLOR_YELLOW, COLOR_BLACK, false); DLG_COLOR(check, COLOR_YELLOW, COLOR_BLACK, false);
DLG_COLOR(check_selected, COLOR_YELLOW, COLOR_RED, true); DLG_COLOR(check_selected, COLOR_YELLOW, COLOR_RED, true);
DLG_COLOR(uarrow, COLOR_RED, COLOR_BLACK, false); DLG_COLOR(uarrow, COLOR_RED, COLOR_BLACK, false);
@ -146,13 +146,13 @@ static void set_blackbg_theme(void)
static void set_bluetitle_theme(void) static void set_bluetitle_theme(void)
{ {
set_classic_theme(); set_classic_theme();
DLG_COLOR(title, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(title, COLOR_BLUE, COLOR_WHITE, true);
DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_BLUE, true); DLG_COLOR(button_key_active, COLOR_YELLOW, COLOR_BLUE, true);
DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_BLUE, true); DLG_COLOR(button_label_active, COLOR_WHITE, COLOR_BLUE, true);
DLG_COLOR(searchbox_title, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(searchbox_title, COLOR_BLUE, COLOR_WHITE, true);
DLG_COLOR(position_indicator, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(position_indicator, COLOR_BLUE, COLOR_WHITE, true);
DLG_COLOR(tag, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(tag, COLOR_BLUE, COLOR_WHITE, true);
DLG_COLOR(tag_key, COLOR_BLUE, COLOR_WHITE, true); DLG_COLOR(tag_key, COLOR_BLUE, COLOR_WHITE, true);
} }

View file

@ -51,8 +51,8 @@ static const char mconf_readme[] = N_(
" Submenus are designated by \"--->\".\n" " Submenus are designated by \"--->\".\n"
"\n" "\n"
" Shortcut: Press the option's highlighted letter (hotkey).\n" " Shortcut: Press the option's highlighted letter (hotkey).\n"
" Pressing a hotkey more than once will sequence\n" " Pressing a hotkey more than once will sequence\n"
" through all visible items which use that hotkey.\n" " through all visible items which use that hotkey.\n"
"\n" "\n"
" You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n" " You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
" unseen options into view.\n" " unseen options into view.\n"
@ -61,8 +61,8 @@ static const char mconf_readme[] = N_(
" and press <ENTER>.\n" " and press <ENTER>.\n"
"\n" "\n"
" Shortcut: Press <ESC><ESC> or <E> or <X> if there is no hotkey\n" " Shortcut: Press <ESC><ESC> or <E> or <X> if there is no hotkey\n"
" using those letters. You may press a single <ESC>, but\n" " using those letters. You may press a single <ESC>, but\n"
" there is a delayed response which you may find annoying.\n" " there is a delayed response which you may find annoying.\n"
"\n" "\n"
" Also, the <TAB> and cursor keys will cycle between <Select>,\n" " Also, the <TAB> and cursor keys will cycle between <Select>,\n"
" <Exit> and <Help>.\n" " <Exit> and <Help>.\n"
@ -81,7 +81,7 @@ static const char mconf_readme[] = N_(
" <S> or the <SPACE BAR>.\n" " <S> or the <SPACE BAR>.\n"
"\n" "\n"
" Shortcut: Press the first letter of the option you wish to set then\n" " Shortcut: Press the first letter of the option you wish to set then\n"
" press <S> or <SPACE BAR>.\n" " press <S> or <SPACE BAR>.\n"
"\n" "\n"
"o To see available help for the item, use the cursor keys to highlight\n" "o To see available help for the item, use the cursor keys to highlight\n"
" <Help> and Press <ENTER>.\n" " <Help> and Press <ENTER>.\n"
@ -271,8 +271,8 @@ search_help[] = N_(
"\n\n" "\n\n"
"Search examples:\n" "Search examples:\n"
"Examples: USB => find all symbols containing USB\n" "Examples: USB => find all symbols containing USB\n"
" ^USB => find all symbols starting with USB\n" " ^USB => find all symbols starting with USB\n"
" USB$ => find all symbols ending with USB\n" " USB$ => find all symbols ending with USB\n"
"\n"); "\n");
static int indent; static int indent;

View file

@ -50,27 +50,27 @@ static const char nconf_global_help[] = N_(
"\n" "\n"
"Menu navigation keys\n" "Menu navigation keys\n"
"----------------------------------------------------------------------\n" "----------------------------------------------------------------------\n"
"Linewise up <Up>\n" "Linewise up <Up>\n"
"Linewise down <Down>\n" "Linewise down <Down>\n"
"Pagewise up <Page Up>\n" "Pagewise up <Page Up>\n"
"Pagewise down <Page Down>\n" "Pagewise down <Page Down>\n"
"First entry <Home>\n" "First entry <Home>\n"
"Last entry <End>\n" "Last entry <End>\n"
"Enter a submenu <Right> <Enter>\n" "Enter a submenu <Right> <Enter>\n"
"Go back to parent menu <Left> <Esc> <F5>\n" "Go back to parent menu <Left> <Esc> <F5>\n"
"Close a help window <Enter> <Esc> <F5>\n" "Close a help window <Enter> <Esc> <F5>\n"
"Close entry window, apply <Enter>\n" "Close entry window, apply <Enter>\n"
"Close entry window, forget <Esc> <F5>\n" "Close entry window, forget <Esc> <F5>\n"
"Start incremental, case-insensitive search for STRING in menu entries,\n" "Start incremental, case-insensitive search for STRING in menu entries,\n"
" no regex support, STRING is displayed in upper left corner\n" " no regex support, STRING is displayed in upper left corner\n"
" </>STRING\n" " </>STRING\n"
" Remove last character <Backspace>\n" " Remove last character <Backspace>\n"
" Jump to next hit <Down>\n" " Jump to next hit <Down>\n"
" Jump to previous hit <Up>\n" " Jump to previous hit <Up>\n"
"Exit menu search mode </> <Esc>\n" "Exit menu search mode </> <Esc>\n"
"Search for configuration variables with or without leading CONFIG_\n" "Search for configuration variables with or without leading CONFIG_\n"
" <F8>RegExpr<Enter>\n" " <F8>RegExpr<Enter>\n"
"Verbose search help <F8><F1>\n" "Verbose search help <F8><F1>\n"
"----------------------------------------------------------------------\n" "----------------------------------------------------------------------\n"
"\n" "\n"
"Unless in a data entry window, key <1> may be used instead of <F1>,\n" "Unless in a data entry window, key <1> may be used instead of <F1>,\n"

View file

@ -52,15 +52,15 @@ static void set_normal_colors(void)
} }
/* available attributes: /* available attributes:
A_NORMAL Normal display (no highlight) A_NORMAL Normal display (no highlight)
A_STANDOUT Best highlighting mode of the terminal. A_STANDOUT Best highlighting mode of the terminal.
A_UNDERLINE Underlining A_UNDERLINE Underlining
A_REVERSE Reverse video A_REVERSE Reverse video
A_BLINK Blinking A_BLINK Blinking
A_DIM Half bright A_DIM Half bright
A_BOLD Extra bright or bold A_BOLD Extra bright or bold
A_PROTECT Protected mode A_PROTECT Protected mode
A_INVIS Invisible or blank mode A_INVIS Invisible or blank mode
A_ALTCHARSET Alternate character set A_ALTCHARSET Alternate character set
A_CHARTEXT Bit-mask to extract a character A_CHARTEXT Bit-mask to extract a character
COLOR_PAIR(n) Color-pair number n COLOR_PAIR(n) Color-pair number n

View file

@ -11,17 +11,17 @@
#include <qsettings.h> #include <qsettings.h>
#if QT_VERSION < 0x040000 #if QT_VERSION < 0x040000
#define Q3ValueList QValueList #define Q3ValueList QValueList
#define Q3PopupMenu QPopupMenu #define Q3PopupMenu QPopupMenu
#define Q3ListView QListView #define Q3ListView QListView
#define Q3ListViewItem QListViewItem #define Q3ListViewItem QListViewItem
#define Q3VBox QVBox #define Q3VBox QVBox
#define Q3TextBrowser QTextBrowser #define Q3TextBrowser QTextBrowser
#define Q3MainWindow QMainWindow #define Q3MainWindow QMainWindow
#define Q3Action QAction #define Q3Action QAction
#define Q3ToolBar QToolBar #define Q3ToolBar QToolBar
#define Q3ListViewItemIterator QListViewItemIterator #define Q3ListViewItemIterator QListViewItemIterator
#define Q3FileDialog QFileDialog #define Q3FileDialog QFileDialog
#endif #endif
class ConfigView; class ConfigView;

View file

@ -3,7 +3,6 @@ ARCHDIR = asm-x86
nobase_include_HEADERS = \ nobase_include_HEADERS = \
rtai.h \ rtai.h \
rtai_hal_names.h \ rtai_hal_names.h \
rtai_bits.h \
rtai_fifos.h \ rtai_fifos.h \
rtai_lxrt.h \ rtai_lxrt.h \
rtai_malloc.h \ rtai_malloc.h \
@ -30,7 +29,6 @@ nobase_include_HEADERS = \
rtai_signal.h \ rtai_signal.h \
rtai_spl.h \ rtai_spl.h \
rtai_tasklets.h \ rtai_tasklets.h \
rtai_tbx.h \
rtai_trace.h \ rtai_trace.h \
rtai_types.h \ rtai_types.h \
rtai_version.h \ rtai_version.h \
@ -41,7 +39,7 @@ install-data-local:
if CONFIG_RTAI_OLD_FASHIONED_BUILD if CONFIG_RTAI_OLD_FASHIONED_BUILD
distclean-local: distclean-local:
rm -f $(DESTDIR)$(includedir)/asm $(DESTDIR)$(includedir)/rtai_config.h rm -f $(srcdir)$(includedir)/asm $(srcdir)$(includedir)/rtai_config.h
endif endif
SUBDIRS = $(ARCHDIR) SUBDIRS = $(ARCHDIR)

View file

@ -28,7 +28,7 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#endif #endif
@ -56,7 +56,7 @@ struct __rtai_xchg_dummy { unsigned long a[100]; };
static inline unsigned long atomic_xchg(volatile void *ptr, unsigned long x) static inline unsigned long atomic_xchg(volatile void *ptr, unsigned long x)
{ {
__asm__ __volatile__(LOCK_PREFIX "xchgl %0,%1" __asm__ __volatile__(LOCK_PREFIX "xchgl %0,%1"
:"=r" (x) :"=r" (x)
:"m" (*__rtai_xg(ptr)), "0" (x) :"m" (*__rtai_xg(ptr)), "0" (x)
:"memory"); :"memory");
return x; return x;
@ -74,21 +74,21 @@ static inline unsigned long atomic_cmpxchg(volatile void *ptr, unsigned long o,
static __inline__ int atomic_dec_and_test(atomic_t *v) static __inline__ int atomic_dec_and_test(atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( __asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1" LOCK_PREFIX "decl %0; sete %1"
:"=m" (*__rtai_xg(v)), "=qm" (c) :"=m" (*__rtai_xg(v)), "=qm" (c)
:"m" (*__rtai_xg(v)) : "memory"); :"m" (*__rtai_xg(v)) : "memory");
return c != 0; return c != 0;
} }
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
__asm__ __volatile__( __asm__ __volatile__(
LOCK_PREFIX "incl %0" LOCK_PREFIX "incl %0"
:"=m" (*__rtai_xg(v)) :"=m" (*__rtai_xg(v))
:"m" (*__rtai_xg(v))); :"m" (*__rtai_xg(v)));
} }
/* Depollute the namespace a bit. */ /* Depollute the namespace a bit. */

View file

@ -28,7 +28,7 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
#define atomic_xchg(ptr, v) xchg(ptr, v) #define atomic_xchg(ptr, v) xchg(ptr, v)
#define atomic_cmpxchg(ptr, o, n) cmpxchg((unsigned long *)(ptr), o, n) #define atomic_cmpxchg(ptr, o, n) cmpxchg((unsigned long *)(ptr), o, n)
#endif #endif
@ -71,21 +71,21 @@ static inline unsigned long atomic_cmpxchg (volatile void *ptr, unsigned long o,
static __inline__ int atomic_dec_and_test(atomic_t *v) static __inline__ int atomic_dec_and_test(atomic_t *v)
{ {
unsigned char c; unsigned char c;
__asm__ __volatile__( __asm__ __volatile__(
LOCK_PREFIX "decl %0; sete %1" LOCK_PREFIX "decl %0; sete %1"
:"=m" (v->counter), "=qm" (c) :"=m" (v->counter), "=qm" (c)
:"m" (v->counter) : "memory"); :"m" (v->counter) : "memory");
return c != 0; return c != 0;
} }
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void atomic_inc(atomic_t *v)
{ {
__asm__ __volatile__( __asm__ __volatile__(
LOCK_PREFIX "incl %0" LOCK_PREFIX "incl %0"
:"=m" (v->counter) :"=m" (v->counter)
:"m" (v->counter)); :"m" (v->counter));
} }
/* Depollute the namespace a bit. */ /* Depollute the namespace a bit. */

View file

@ -34,10 +34,10 @@
#undef SETUP_8254_TSC_EMULATION #undef SETUP_8254_TSC_EMULATION
#undef CLEAR_8254_TSC_EMULATION #undef CLEAR_8254_TSC_EMULATION
#define RTAI_CPU_FREQ RTAI_FREQ_8254 #define RTAI_CPU_FREQ RTAI_FREQ_8254
#define RTAI_CALIBRATED_CPU_FREQ RTAI_FREQ_8254 #define RTAI_CALIBRATED_CPU_FREQ RTAI_FREQ_8254
#define rtai_rdtsc() rd_8254_ts() #define rtai_rdtsc() rd_8254_ts()
#define rdtsc() rd_8254_ts() #define rdtsc() rd_8254_ts()
#define TICK_8254_TSC_EMULATION() rd_8254_ts() #define TICK_8254_TSC_EMULATION() rd_8254_ts()

View file

@ -32,20 +32,10 @@
#include <asm/processor.h> #include <asm/processor.h>
#endif /* !__cplusplus */ #endif /* !__cplusplus */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,25)
typedef union i387_union FPU_ENV;
#define TASK_FPENV(tsk) (&(tsk)->thread.i387)
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
typedef union thread_xstate FPU_ENV;
#define TASK_FPENV(tsk) ((tsk)->thread.xstate)
#else
typedef union thread_xstate FPU_ENV; typedef union thread_xstate FPU_ENV;
#define TASK_FPENV(tsk) ((tsk)->thread.fpu.state) #define TASK_FPENV(tsk) ((tsk)->thread.fpu.state)
#endif
#ifdef CONFIG_RTAI_FPU_SUPPORT /* RAW FPU MANAGEMENT FOR USAGE FROM WHAT/WHEREVER RTAI DOES IN KERNEL */
// RAW FPU MANAGEMENT FOR USAGE FROM WHAT/WHEREVER RTAI DOES IN KERNEL
#define enable_fpu() do { \ #define enable_fpu() do { \
__asm__ __volatile__ ("clts"); \ __asm__ __volatile__ ("clts"); \
@ -62,7 +52,7 @@ typedef union thread_xstate FPU_ENV;
} \ } \
} while (0) } while (0)
// initialise the hard fpu unit directly /* initialise the hard fpu unit directly */
#define init_hard_fpenv() do { \ #define init_hard_fpenv() do { \
__asm__ __volatile__ ("clts; fninit"); \ __asm__ __volatile__ ("clts; fninit"); \
if (cpu_has_xmm) { \ if (cpu_has_xmm) { \
@ -71,7 +61,7 @@ typedef union thread_xstate FPU_ENV;
} \ } \
} while (0) } while (0)
// initialise the given fpenv union, without touching the related hard fpu unit /* initialise the given fpenv union, without touching the related hard fpu unit */
#define __init_fpenv(fpenv) do { \ #define __init_fpenv(fpenv) do { \
if (cpu_has_fxsr) { \ if (cpu_has_fxsr) { \
memset(&(fpenv)->fxsave, 0, sizeof(struct i387_fxsave_struct));\ memset(&(fpenv)->fxsave, 0, sizeof(struct i387_fxsave_struct));\
@ -104,14 +94,14 @@ typedef union thread_xstate FPU_ENV;
} \ } \
} while (0) } while (0)
// FPU MANAGEMENT DRESSED FOR IN KTHREAD/THREAD/PROCESS FPU USAGE FROM RTAI /* FPU MANAGEMENT DRESSED FOR IN KTHREAD/THREAD/PROCESS FPU USAGE FROM RTAI */
// Macros used for RTAI own kernel space tasks, where it uses the FPU env union /* Macros used for RTAI own kernel space tasks, where it uses the FPU env union */
#define init_fpenv(fpenv) do { __init_fpenv(&(fpenv)); } while (0) #define init_fpenv(fpenv) do { __init_fpenv(&(fpenv)); } while (0)
#define save_fpenv(fpenv) do { __save_fpenv(&(fpenv)); } while (0) #define save_fpenv(fpenv) do { __save_fpenv(&(fpenv)); } while (0)
#define restore_fpenv(fpenv) do { __restore_fpenv(&(fpenv)); } while (0) #define restore_fpenv(fpenv) do { __restore_fpenv(&(fpenv)); } while (0)
// Macros used for user space, where Linux might use eother a pointer or the FPU env union /* Macros used for user space, where Linux might use eother a pointer or the FPU env union */
#define init_hard_fpu(lnxtsk) do { \ #define init_hard_fpu(lnxtsk) do { \
init_hard_fpenv(); \ init_hard_fpenv(); \
set_lnxtsk_uses_fpu(lnxtsk); \ set_lnxtsk_uses_fpu(lnxtsk); \
@ -129,73 +119,18 @@ typedef union thread_xstate FPU_ENV;
set_lnxtsk_using_fpu(lnxtsk); \ set_lnxtsk_using_fpu(lnxtsk); \
} while (0) } while (0)
#else /* !CONFIG_RTAI_FPU_SUPPORT */
#define enable_fpu()
#define save_fpcr_and_enable_fpu(fpcr)
#define restore_fpcr(fpcr)
#define init_hard_fpenv()
#define init_fpenv(fpenv)
#define save_fpenv(fpenv)
#define restore_fpenv(fpenv)
#define init_hard_fpu(lnxtsk)
#define init_fpu(lnxtsk)
#define restore_fpu(lnxtsk)
#endif /* CONFIG_RTAI_FPU_SUPPORT */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#define set_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 1; } while(0)
#define clear_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 0; } while(0)
#define lnxtsk_uses_fpu(lnxtsk) ((lnxtsk)->used_math)
#define set_lnxtsk_using_fpu(lnxtsk) \
do { (lnxtsk)->flags |= PF_USEDFPU; } while(0)
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
#define set_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 1; } while(0)
#define clear_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 0; } while(0)
#define lnxtsk_uses_fpu(lnxtsk) ((lnxtsk)->used_math)
#define set_lnxtsk_using_fpu(lnxtsk) \
do { task_thread_info(lnxtsk)->status |= TS_USEDFPU; } while(0)
// do { (lnxtsk)->thread_info->status |= TS_USEDFPU; } while(0)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
#define set_lnxtsk_uses_fpu(lnxtsk) \ #define set_lnxtsk_uses_fpu(lnxtsk) \
do { set_stopped_child_used_math(lnxtsk); } while(0) do { set_stopped_child_used_math(lnxtsk); } while(0)
#define clear_lnxtsk_uses_fpu(lnxtsk) \ #define clear_lnxtsk_uses_fpu(lnxtsk) \
do { clear_stopped_child_used_math(lnxtsk); } while(0) do { clear_stopped_child_used_math(lnxtsk); } while(0)
#define lnxtsk_uses_fpu(lnxtsk) (tsk_used_math(lnxtsk)) #define lnxtsk_uses_fpu(lnxtsk) (tsk_used_math(lnxtsk))
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
#undef init_fpu #undef init_fpu
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/fpu-internal.h> #include <asm/fpu-internal.h>
#define rtai_set_fpu_used(lnxtsk) __thread_set_has_fpu(lnxtsk) #define rtai_set_fpu_used(lnxtsk) __thread_set_has_fpu(lnxtsk)
#else
#define rtai_set_fpu_used(lnxtsk) do { task_thread_info(lnxtsk)->status |= TS_USEDFPU; } while(0)
#endif
#define set_lnxtsk_using_fpu(lnxtsk) \ #define set_lnxtsk_using_fpu(lnxtsk) \
do { rtai_set_fpu_used(lnxtsk); } while(0) do { rtai_set_fpu_used(lnxtsk); } while(0)
// do { (lnxtsk)->thread_info->status |= TS_USEDFPU; } while(0)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) */
#endif /* !_RTAI_ASM_I386_FPU_H */ #endif /* !_RTAI_ASM_I386_FPU_H */

View file

@ -34,20 +34,10 @@
#include <asm/processor.h> #include <asm/processor.h>
#endif /* !__cplusplus */ #endif /* !__cplusplus */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,25)
typedef union i387_union FPU_ENV;
#define TASK_FPENV(tsk) (&(tsk)->thread.i387.fxsave)
#elif LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,34)
typedef union thread_xstate FPU_ENV;
#define TASK_FPENV(tsk) (&(tsk)->thread.xstate->fxsave)
#else
typedef union thread_xstate FPU_ENV; typedef union thread_xstate FPU_ENV;
#define TASK_FPENV(tsk) (&(tsk)->thread.fpu.state->fxsave) #define TASK_FPENV(tsk) (&(tsk)->thread.fpu.state->fxsave)
#endif
#ifdef CONFIG_RTAI_FPU_SUPPORT /* RAW FPU MANAGEMENT FOR USAGE FROM WHAT/WHEREVER RTAI DOES IN KERNEL */
// RAW FPU MANAGEMENT FOR USAGE FROM WHAT/WHEREVER RTAI DOES IN KERNEL
#define enable_fpu() do { \ #define enable_fpu() do { \
__asm__ __volatile__ ("clts"); \ __asm__ __volatile__ ("clts"); \
@ -68,7 +58,7 @@ typedef union thread_xstate FPU_ENV;
} \ } \
} while (0) } while (0)
// initialise the hard fpu unit directly /* initialise the hard fpu unit directly */
#define init_hard_fpenv() do { \ #define init_hard_fpenv() do { \
unsigned long __mxcsr; \ unsigned long __mxcsr; \
__asm__ __volatile__ ("clts; fninit"); \ __asm__ __volatile__ ("clts; fninit"); \
@ -76,7 +66,7 @@ typedef union thread_xstate FPU_ENV;
__asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr)); \ __asm__ __volatile__ ("ldmxcsr %0": : "m" (__mxcsr)); \
} while (0) } while (0)
// initialise the given fpenv union, without touching the related hard fpu unit /* initialise the given fpenv union, without touching the related hard fpu unit */
#define __init_fpenv(fpenv) do { \ #define __init_fpenv(fpenv) do { \
memset(fpenv, 0, sizeof(struct i387_fxsave_struct)); \ memset(fpenv, 0, sizeof(struct i387_fxsave_struct)); \
(fpenv)->cwd = 0x37f; \ (fpenv)->cwd = 0x37f; \
@ -84,7 +74,6 @@ typedef union thread_xstate FPU_ENV;
} while (0) } while (0)
/* taken from Linux i387.h */ /* taken from Linux i387.h */
static inline int __save_fpenv(struct i387_fxsave_struct __user *fx) static inline int __save_fpenv(struct i387_fxsave_struct __user *fx)
{ {
int err; int err;
@ -129,7 +118,7 @@ static inline int __restore_fpenv(struct i387_fxsave_struct *fx)
#define save_fpenv(fpenv) do { __save_fpenv(&(fpenv).fxsave); } while (0) #define save_fpenv(fpenv) do { __save_fpenv(&(fpenv).fxsave); } while (0)
#define restore_fpenv(fpenv) do { __restore_fpenv(&(fpenv).fxsave); } while (0) #define restore_fpenv(fpenv) do { __restore_fpenv(&(fpenv).fxsave); } while (0)
// FPU MANAGEMENT DRESSED FOR IN KTHREAD/THREAD/PROCESS FPU USAGE FROM RTAI /* FPU MANAGEMENT DRESSED FOR IN KTHREAD/THREAD/PROCESS FPU USAGE FROM RTAI */
#define init_hard_fpu(lnxtsk) do { \ #define init_hard_fpu(lnxtsk) do { \
init_hard_fpenv(); \ init_hard_fpenv(); \
@ -148,58 +137,18 @@ static inline int __restore_fpenv(struct i387_fxsave_struct *fx)
set_lnxtsk_using_fpu(lnxtsk); \ set_lnxtsk_using_fpu(lnxtsk); \
} while (0) } while (0)
#else /* !CONFIG_RTAI_FPU_SUPPORT */
#define enable_fpu()
#define save_fpcr_and_enable_fpu(fpcr)
#define restore_fpcr(fpcr)
#define init_hard_fpenv()
#define init_fpenv(fpenv)
#define save_fpenv(fpenv)
#define restore_fpenv(fpenv)
#define init_hard_fpu(lnxtsk)
#define init_fpu(lnxtsk)
#define restore_fpu(lnxtsk)
#endif /* CONFIG_RTAI_FPU_SUPPORT */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
#define set_lnxtsk_uses_fpu(lnxtsk) \ #define set_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 1; } while(0) do { set_stopped_child_used_math(lnxtsk); } while(0)
#define clear_lnxtsk_uses_fpu(lnxtsk) \ #define clear_lnxtsk_uses_fpu(lnxtsk) \
do { (lnxtsk)->used_math = 0; } while(0) do { clear_stopped_child_used_math(lnxtsk); } while(0)
#define lnxtsk_uses_fpu(lnxtsk) ((lnxtsk)->used_math)
#define set_lnxtsk_using_fpu(lnxtsk) \
do { task_thread_info(lnxtsk)->status |= TS_USEDFPU; } while(0)
// do { (lnxtsk)->thread_info->status |= TS_USEDFPU; } while(0)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
#define set_lnxtsk_uses_fpu(lnxtsk) \
do { set_stopped_child_used_math(lnxtsk); } while(0)
#define clear_lnxtsk_uses_fpu(lnxtsk) \
do { clear_stopped_child_used_math(lnxtsk); } while(0)
#define lnxtsk_uses_fpu(lnxtsk) (tsk_used_math(lnxtsk)) #define lnxtsk_uses_fpu(lnxtsk) (tsk_used_math(lnxtsk))
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
#undef init_fpu #undef init_fpu
#include <asm/i387.h> #include <asm/i387.h>
#include <asm/fpu-internal.h> #include <asm/fpu-internal.h>
#define rtai_set_fpu_used(lnxtsk) __thread_set_has_fpu(lnxtsk) #define rtai_set_fpu_used(lnxtsk) __thread_set_has_fpu(lnxtsk)
#else
#define rtai_set_fpu_used(lnxtsk) do { task_thread_info(lnxtsk)->status |= TS_USEDFPU; } while(0)
#endif
#define set_lnxtsk_using_fpu(lnxtsk) \ #define set_lnxtsk_using_fpu(lnxtsk) \
do { rtai_set_fpu_used(lnxtsk); } while(0) //do { task_thread_info(lnxtsk)->status |= TS_USEDFPU; } while(0) do { rtai_set_fpu_used(lnxtsk); } while(0)
// do { (lnxtsk)->thread_info->status |= TS_USEDFPU; } while(0)
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11) */
#endif /* !_RTAI_ASM_X86_64_FPU_H */ #endif /* !_RTAI_ASM_X86_64_FPU_H */

View file

@ -36,13 +36,13 @@
static inline RTIME rt_get_tscnt(void) static inline RTIME rt_get_tscnt(void)
{ {
#ifdef __i386__ #ifdef __i386__
unsigned long long t; unsigned long long t;
__asm__ __volatile__ ("rdtsc" : "=A" (t)); __asm__ __volatile__ ("rdtsc" : "=A" (t));
return t; return t;
#else #else
union { unsigned int __ad[2]; RTIME t; } t; union { unsigned int __ad[2]; RTIME t; } t;
__asm__ __volatile__ ("rdtsc" : "=a" (t.__ad[0]), "=d" (t.__ad[1])); __asm__ __volatile__ ("rdtsc" : "=a" (t.__ad[0]), "=d" (t.__ad[1]));
return t.t; return t.t;
#endif #endif
} }
#else #else

View file

@ -73,23 +73,23 @@ static __inline__ unsigned long ffnz (unsigned long word) {
static inline unsigned long __ffs(unsigned long word) static inline unsigned long __ffs(unsigned long word)
{ {
__asm__("bsfl %1,%0" __asm__("bsfl %1,%0"
:"=r" (word) :"=r" (word)
:"rm" (word)); :"rm" (word));
return word; return word;
} }
static inline unsigned __find_first_bit(const unsigned long *addr, unsigned size) static inline unsigned __find_first_bit(const unsigned long *addr, unsigned size)
{ {
unsigned x = 0; unsigned x = 0;
while (x < size) { while (x < size) {
unsigned long val = *addr++; unsigned long val = *addr++;
if (val) if (val)
return __ffs(val) + x; return __ffs(val) + x;
x += (sizeof(*addr)<<3); x += (sizeof(*addr)<<3);
} }
return x; return x;
} }
static inline int find_next_bit(const unsigned long *addr, int size, int offset) static inline int find_next_bit(const unsigned long *addr, int size, int offset)
@ -158,17 +158,17 @@ static inline unsigned long long rtai_ulldiv (unsigned long long ull,
/* do_div below taken from Linux-2.6.20 */ /* do_div below taken from Linux-2.6.20 */
#ifndef do_div #ifndef do_div
#define do_div(n,base) ({ \ #define do_div(n,base) ({ \
unsigned long __upper, __low, __high, __mod, __base; \ unsigned long __upper, __low, __high, __mod, __base; \
__base = (base); \ __base = (base); \
asm("":"=a" (__low), "=d" (__high):"A" (n)); \ asm("":"=a" (__low), "=d" (__high):"A" (n)); \
__upper = __high; \ __upper = __high; \
if (__high) { \ if (__high) { \
__upper = __high % (__base); \ __upper = __high % (__base); \
__high = __high / (__base); \ __high = __high / (__base); \
} \ } \
asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \ asm("divl %2":"=a" (__low), "=d" (__mod):"rm" (__base), "0" (__low), "1" (__upper)); \
asm("":"=A" (n):"a" (__low),"d" (__high)); \ asm("":"=A" (n):"a" (__low),"d" (__high)); \
__mod; \ __mod; \
}) })
#endif #endif
@ -207,20 +207,20 @@ static inline long long rtai_llimd(long long ll, int mult, int div) {
"mull %%esi\t\n" \ "mull %%esi\t\n" \
"movl %%eax,%%ebx\n\t" \ "movl %%eax,%%ebx\n\t" \
"movl %%ecx,%%eax\t\n" \ "movl %%ecx,%%eax\t\n" \
"movl %%edx,%%ecx\t\n" \ "movl %%edx,%%ecx\t\n" \
"mull %%esi\n\t" \ "mull %%esi\n\t" \
"addl %%ecx,%%eax\t\n" \ "addl %%ecx,%%eax\t\n" \
"adcl $0,%%edx\t\n" \ "adcl $0,%%edx\t\n" \
"divl %%edi\n\t" \ "divl %%edi\n\t" \
"movl %%eax,%%ecx\t\n" \ "movl %%eax,%%ecx\t\n" \
"movl %%ebx,%%eax\t\n" \ "movl %%ebx,%%eax\t\n" \
"divl %%edi\n\t" \ "divl %%edi\n\t" \
"sal $1,%%edx\t\n" \ "sal $1,%%edx\t\n" \
"cmpl %%edx,%%edi\t\n" \ "cmpl %%edx,%%edi\t\n" \
"movl %%ecx,%%edx\n\t" \ "movl %%ecx,%%edx\n\t" \
"jge 1f\t\n" \ "jge 1f\t\n" \
"addl $1,%%eax\t\n" \ "addl $1,%%eax\t\n" \
"adcl $0,%%edx\t\n" \ "adcl $0,%%edx\t\n" \
"1:\t\n" \ "1:\t\n" \
: "=A" (ll) \ : "=A" (ll) \
: "A" (ll), "S" (mult), "D" (div) \ : "A" (ll), "S" (mult), "D" (div) \
@ -241,14 +241,14 @@ static inline unsigned long long rtai_u64div32c(unsigned long long a,
union { unsigned long long ull; unsigned long ul[2]; } u; union { unsigned long long ull; unsigned long ul[2]; } u;
u.ull = a; u.ull = a;
__asm__ __volatile( __asm__ __volatile(
"\n movl %%eax,%%ebx" "\n movl %%eax,%%ebx"
"\n movl %%edx,%%eax" "\n movl %%edx,%%eax"
"\n xorl %%edx,%%edx" "\n xorl %%edx,%%edx"
"\n divl %%ecx" "\n divl %%ecx"
"\n xchgl %%eax,%%ebx" "\n xchgl %%eax,%%ebx"
"\n divl %%ecx" "\n divl %%ecx"
"\n movl %%edx,%%ecx" "\n movl %%edx,%%ecx"
"\n movl %%ebx,%%edx" "\n movl %%ebx,%%edx"
: "=a" (u.ul[0]), "=d" (u.ul[1]) : "=a" (u.ul[0]), "=d" (u.ul[1])
: "a" (u.ul[0]), "d" (u.ul[1]), "c" (b) : "a" (u.ul[0]), "d" (u.ul[1]), "c" (b)
: "%ebx" ); : "%ebx" );
@ -289,18 +289,18 @@ struct rtai_realtime_irq_s {
#define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI #define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI
#define RTAI_TIMER_8254_IRQ 0 #define RTAI_TIMER_8254_IRQ 0
#define RTAI_FREQ_8254 1193180 #define RTAI_FREQ_8254 1193180
#define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ) #define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ)
#define RTAI_COUNTER_2_LATCH 0xfffe #define RTAI_COUNTER_2_LATCH 0xfffe
#define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY #define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY
#define RTAI_SETUP_TIME_8254 2011 #define RTAI_SETUP_TIME_8254 2011
#define RTAI_CALIBRATED_APIC_FREQ 0 #define RTAI_CALIBRATED_APIC_FREQ 0
#define RTAI_FREQ_APIC (rtai_tunables.apic_freq) #define RTAI_FREQ_APIC (rtai_tunables.apic_freq)
#define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY #define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY
#define RTAI_SETUP_TIME_APIC 1000 #define RTAI_SETUP_TIME_APIC 1000
#define RTAI_TIME_LIMIT 0x7000000000000000LL #define RTAI_TIME_LIMIT 0x7000000000000000LL
#define RTAI_IFLAG 9 #define RTAI_IFLAG 9
@ -308,18 +308,18 @@ struct rtai_realtime_irq_s {
#define rtai_tskext(idx) hal_tskext[idx] #define rtai_tskext(idx) hal_tskext[idx]
/* Use these to grant atomic protection when accessing the hardware */ /* Use these to grant atomic protection when accessing the hardware */
#define rtai_hw_cli() hal_hw_cli() #define rtai_hw_cli() hal_hw_cli()
#define rtai_hw_sti() hal_hw_sti() #define rtai_hw_sti() hal_hw_sti()
#define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x) #define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x)
#define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x) #define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x)
#define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x) #define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x)
/* Use these to grant atomic protection in hard real time code */ /* Use these to grant atomic protection in hard real time code */
#define rtai_cli() hal_hw_cli() #define rtai_cli() hal_hw_cli()
#define rtai_sti() hal_hw_sti() #define rtai_sti() hal_hw_sti()
#define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x) #define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x)
#define rtai_restore_flags(x) hal_hw_local_irq_restore(x) #define rtai_restore_flags(x) hal_hw_local_irq_restore(x)
#define rtai_save_flags(x) hal_hw_local_irq_flags(x) #define rtai_save_flags(x) hal_hw_local_irq_flags(x)
#define RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU KERNEL_VERSION(2,6,20) #define RTAI_LT_KERNEL_VERSION_FOR_NONPERCPU KERNEL_VERSION(2,6,20)
@ -421,7 +421,7 @@ typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie);
#ifdef CONFIG_X86_TSC #ifdef CONFIG_X86_TSC
#define RTAI_CALIBRATED_CPU_FREQ 0 #define RTAI_CALIBRATED_CPU_FREQ 0
#define RTAI_CPU_FREQ (rtai_tunables.cpu_freq) #define RTAI_CPU_FREQ (rtai_tunables.cpu_freq)
#if 0 #if 0
@ -446,7 +446,7 @@ extern volatile long rtai_tsc_ofst[];
#else /* !CONFIG_X86_TSC */ #else /* !CONFIG_X86_TSC */
#define RTAI_CPU_FREQ RTAI_FREQ_8254 #define RTAI_CPU_FREQ RTAI_FREQ_8254
#define RTAI_CALIBRATED_CPU_FREQ RTAI_FREQ_8254 #define RTAI_CALIBRATED_CPU_FREQ RTAI_FREQ_8254
#define rtai_rdtsc() rd_8254_ts() #define rtai_rdtsc() rd_8254_ts()
@ -848,15 +848,15 @@ static inline unsigned long save_and_set_taskpri(unsigned long taskpri)
static inline void rt_set_timer_delay (int delay) static inline void rt_set_timer_delay (int delay)
{ {
if (delay) { if (delay) {
unsigned long flags; unsigned long flags;
rtai_hw_save_flags_and_cli(flags); rtai_hw_save_flags_and_cli(flags);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
apic_write_around(APIC_TMICT, delay); apic_write_around(APIC_TMICT, delay);
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
outb(delay & 0xff,0x40); outb(delay & 0xff,0x40);
outb(delay >> 8,0x40); outb(delay >> 8,0x40);
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
rtai_hw_restore_flags(flags); rtai_hw_restore_flags(flags);
} }
} }

View file

@ -131,14 +131,14 @@ static inline unsigned long long rtai_u64div32c(unsigned long long a,
union { unsigned long long ull; unsigned long ul[2]; } u; union { unsigned long long ull; unsigned long ul[2]; } u;
u.ull = a; u.ull = a;
__asm__ __volatile( __asm__ __volatile(
"\n movq %%rax,%%rbx" "\n movq %%rax,%%rbx"
"\n movq %%rdx,%%rax" "\n movq %%rdx,%%rax"
"\n xorq %%rdx,%%rdx" "\n xorq %%rdx,%%rdx"
"\n divq %%rcx" "\n divq %%rcx"
"\n xchgq %%rax,%%rbx" "\n xchgq %%rax,%%rbx"
"\n divq %%rcx" "\n divq %%rcx"
"\n movq %%rdx,%%rcx" "\n movq %%rdx,%%rcx"
"\n movq %%rbx,%%rdx" "\n movq %%rbx,%%rdx"
: "=a" (u.ul[0]), "=d" (u.ul[1]) : "=a" (u.ul[0]), "=d" (u.ul[1])
: "a" (u.ul[0]), "d" (u.ul[1]), "c" (b) : "a" (u.ul[0]), "d" (u.ul[1]), "c" (b)
: "%rbx" ); : "%rbx" );
@ -176,8 +176,8 @@ static inline unsigned long long rtai_u64div32c(unsigned long long a,
struct rtai_realtime_irq_s { struct rtai_realtime_irq_s {
// int (*handler)(unsigned irq, void *cookie); // int (*handler)(unsigned irq, void *cookie);
// void *cookie; // void *cookie;
int retmode; int retmode;
unsigned long cpumask; unsigned long cpumask;
// int (*irq_ack)(unsigned int, void *); // int (*irq_ack)(unsigned int, void *);
}; };
@ -218,18 +218,18 @@ static inline int ext_irq_vector(int irq)
#define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI #define RTAI_SMP_NOTIFY_IPI RTAI_APIC_LOW_IPI
#define RTAI_TIMER_8254_IRQ 0 #define RTAI_TIMER_8254_IRQ 0
#define RTAI_FREQ_8254 1193180 #define RTAI_FREQ_8254 1193180
#define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ) #define RTAI_APIC_ICOUNT ((RTAI_FREQ_APIC + HZ/2)/HZ)
#define RTAI_COUNTER_2_LATCH 0xfffe #define RTAI_COUNTER_2_LATCH 0xfffe
#define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY #define RTAI_LATENCY_8254 CONFIG_RTAI_SCHED_8254_LATENCY
#define RTAI_SETUP_TIME_8254 2011 #define RTAI_SETUP_TIME_8254 2011
#define RTAI_CALIBRATED_APIC_FREQ 0 #define RTAI_CALIBRATED_APIC_FREQ 0
#define RTAI_FREQ_APIC (rtai_tunables.apic_freq) #define RTAI_FREQ_APIC (rtai_tunables.apic_freq)
#define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY #define RTAI_LATENCY_APIC CONFIG_RTAI_SCHED_APIC_LATENCY
#define RTAI_SETUP_TIME_APIC 1000 #define RTAI_SETUP_TIME_APIC 1000
#define RTAI_TIME_LIMIT 0x7000000000000000LL #define RTAI_TIME_LIMIT 0x7000000000000000LL
#define RTAI_IFLAG 9 #define RTAI_IFLAG 9
@ -237,18 +237,18 @@ static inline int ext_irq_vector(int irq)
#define rtai_tskext(idx) hal_tskext[idx] #define rtai_tskext(idx) hal_tskext[idx]
/* Use these to grant atomic protection when accessing the hardware */ /* Use these to grant atomic protection when accessing the hardware */
#define rtai_hw_cli() hal_hw_cli() #define rtai_hw_cli() hal_hw_cli()
#define rtai_hw_sti() hal_hw_sti() #define rtai_hw_sti() hal_hw_sti()
#define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x) #define rtai_hw_save_flags_and_cli(x) hal_hw_local_irq_save(x)
#define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x) #define rtai_hw_restore_flags(x) hal_hw_local_irq_restore(x)
#define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x) #define rtai_hw_save_flags(x) hal_hw_local_irq_flags(x)
/* Use these to grant atomic protection in hard real time code */ /* Use these to grant atomic protection in hard real time code */
#define rtai_cli() hal_hw_cli() #define rtai_cli() hal_hw_cli()
#define rtai_sti() hal_hw_sti() #define rtai_sti() hal_hw_sti()
#define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x) #define rtai_save_flags_and_cli(x) hal_hw_local_irq_save(x)
#define rtai_restore_flags(x) hal_hw_local_irq_restore(x) #define rtai_restore_flags(x) hal_hw_local_irq_restore(x)
#define rtai_save_flags(x) hal_hw_local_irq_flags(x) #define rtai_save_flags(x) hal_hw_local_irq_flags(x)
/* /*
static inline struct hal_domain_struct *get_domain_pointer(int n) static inline struct hal_domain_struct *get_domain_pointer(int n)
@ -283,10 +283,10 @@ do { \
#define hal_fast_flush_pipeline(cpuid) \ #define hal_fast_flush_pipeline(cpuid) \
do { \ do { \
if (__ipipe_ipending_p(ipipe_this_cpu_root_context())) { \ if (__ipipe_ipending_p(ipipe_this_cpu_root_context())) { \
rtai_cli(); \ rtai_cli(); \
__ipipe_sync_stage(); \ __ipipe_sync_stage(); \
} \ } \
} while (0) } while (0)
#else #else
@ -303,29 +303,29 @@ do { \
#define hal_fast_flush_pipeline(cpuid) \ #define hal_fast_flush_pipeline(cpuid) \
do { \ do { \
if (__ipipe_ipending_p(ipipe_this_cpu_root_context())) { \ if (__ipipe_ipending_p(ipipe_this_cpu_root_context())) { \
rtai_cli(); \ rtai_cli(); \
__ipipe_sync_stage(); \ __ipipe_sync_stage(); \
} \ } \
} while (0) } while (0)
#else #else
#define hal_pend_domain_uncond(irq, domain, cpuid) \ #define hal_pend_domain_uncond(irq, domain, cpuid) \
do { \ do { \
if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \ if (likely(!test_bit(IPIPE_LOCK_FLAG, &(domain)->irqs[irq].control))) { \
__set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqpend_lomask)[(irq) >> IPIPE_IRQ_ISHIFT]); \ __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqpend_lomask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
__set_bit((irq) >> IPIPE_IRQ_ISHIFT, &ipipe_cpudom_var(domain, irqpend_himask)); \ __set_bit((irq) >> IPIPE_IRQ_ISHIFT, &ipipe_cpudom_var(domain, irqpend_himask)); \
} else { \ } else { \
__set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqheld_mask)[(irq) >> IPIPE_IRQ_ISHIFT]); \ __set_bit((irq) & IPIPE_IRQ_IMASK, &ipipe_cpudom_var(domain, irqheld_mask)[(irq) >> IPIPE_IRQ_ISHIFT]); \
} \ } \
ipipe_cpudom_var(domain, irqall)[irq]++; \ ipipe_cpudom_var(domain, irqall)[irq]++; \
} while (0) } while (0)
#define hal_fast_flush_pipeline(cpuid) \ #define hal_fast_flush_pipeline(cpuid) \
do { \ do { \
if (ipipe_cpudom_var(hal_root_domain, irqpend_himask) != 0) { \ if (ipipe_cpudom_var(hal_root_domain, irqpend_himask) != 0) { \
rtai_cli(); \ rtai_cli(); \
hal_sync_stage(IPIPE_IRQMASK_ANY); \ hal_sync_stage(IPIPE_IRQMASK_ANY); \
} \ } \
} while (0) } while (0)
#endif #endif
@ -356,7 +356,7 @@ do { \
typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie); typedef int (*rt_irq_handler_t)(unsigned irq, void *cookie);
#define RTAI_CALIBRATED_CPU_FREQ 0 #define RTAI_CALIBRATED_CPU_FREQ 0
#define RTAI_CPU_FREQ (rtai_tunables.cpu_freq) #define RTAI_CPU_FREQ (rtai_tunables.cpu_freq)
static inline unsigned long long rtai_rdtsc (void) static inline unsigned long long rtai_rdtsc (void)
{ {
@ -385,15 +385,15 @@ extern volatile unsigned long rtai_cpu_lock[];
//#define RTAI_TASKPRI 0xf0 // simplest usage without changing Linux code base //#define RTAI_TASKPRI 0xf0 // simplest usage without changing Linux code base
#if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI) #if defined(CONFIG_X86_LOCAL_APIC) && defined(RTAI_TASKPRI)
#define SET_TASKPRI(cpuid) \ #define SET_TASKPRI(cpuid) \
if (!rtai_linux_context[cpuid].set_taskpri) { \ if (!rtai_linux_context[cpuid].set_taskpri) { \
apic_write_around(APIC_TASKPRI, ((apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK) | RTAI_TASKPRI)); \ apic_write_around(APIC_TASKPRI, ((apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK) | RTAI_TASKPRI)); \
rtai_linux_context[cpuid].set_taskpri = 1; \ rtai_linux_context[cpuid].set_taskpri = 1; \
} }
#define CLR_TASKPRI(cpuid) \ #define CLR_TASKPRI(cpuid) \
if (rtai_linux_context[cpuid].set_taskpri) { \ if (rtai_linux_context[cpuid].set_taskpri) { \
apic_write_around(APIC_TASKPRI, (apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK)); \ apic_write_around(APIC_TASKPRI, (apic_read(APIC_TASKPRI) & ~APIC_TPRI_MASK)); \
rtai_linux_context[cpuid].set_taskpri = 0; \ rtai_linux_context[cpuid].set_taskpri = 0; \
} }
#else #else
#define SET_TASKPRI(cpuid) #define SET_TASKPRI(cpuid)
#define CLR_TASKPRI(cpuid) #define CLR_TASKPRI(cpuid)
@ -696,25 +696,25 @@ extern struct hal_domain_struct rtai_domain;
#define _rt_switch_to_real_time(cpuid) \ #define _rt_switch_to_real_time(cpuid) \
do { \ do { \
rtai_linux_context[cpuid].lflags = xchg(ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); \ rtai_linux_context[cpuid].lflags = xchg(ROOT_STATUS_ADR(cpuid), (1 << IPIPE_STALL_FLAG)); \
rtai_linux_context[cpuid].sflags = 1; \ rtai_linux_context[cpuid].sflags = 1; \
__ipipe_set_current_domain(&rtai_domain); /*hal_current_domain(cpuid) = &rtai_domain;*/ \ __ipipe_set_current_domain(&rtai_domain); /*hal_current_domain(cpuid) = &rtai_domain;*/ \
} while (0) } while (0)
#define rt_switch_to_linux(cpuid) \ #define rt_switch_to_linux(cpuid) \
do { \ do { \
if (rtai_linux_context[cpuid].sflags) { \ if (rtai_linux_context[cpuid].sflags) { \
__ipipe_set_current_domain(hal_root_domain); /*hal_current_domain(cpuid) = hal_root_domain; */\ __ipipe_set_current_domain(hal_root_domain); /*hal_current_domain(cpuid) = hal_root_domain; */\
ROOT_STATUS_VAL(cpuid) = rtai_linux_context[cpuid].lflags; \ ROOT_STATUS_VAL(cpuid) = rtai_linux_context[cpuid].lflags; \
rtai_linux_context[cpuid].sflags = 0; \ rtai_linux_context[cpuid].sflags = 0; \
} \ } \
} while (0) } while (0)
#define rt_switch_to_real_time(cpuid) \ #define rt_switch_to_real_time(cpuid) \
do { \ do { \
if (!rtai_linux_context[cpuid].sflags) { \ if (!rtai_linux_context[cpuid].sflags) { \
_rt_switch_to_real_time(cpuid); \ _rt_switch_to_real_time(cpuid); \
} \ } \
} while (0) } while (0)
#define rtai_get_intr_handler(v) \ #define rtai_get_intr_handler(v) \
@ -731,21 +731,21 @@ do { \
static inline int rt_save_switch_to_real_time(int cpuid) static inline int rt_save_switch_to_real_time(int cpuid)
{ {
if (!rtai_linux_context[cpuid].sflags) { if (!rtai_linux_context[cpuid].sflags) {
_rt_switch_to_real_time(cpuid); _rt_switch_to_real_time(cpuid);
return 0; return 0;
} }
return 1; return 1;
} }
#define rt_restore_switch_to_linux(sflags, cpuid) \ #define rt_restore_switch_to_linux(sflags, cpuid) \
do { \ do { \
if (!sflags) { \ if (!sflags) { \
rt_switch_to_linux(cpuid); \ rt_switch_to_linux(cpuid); \
} else if (!rtai_linux_context[cpuid].sflags) { \ } else if (!rtai_linux_context[cpuid].sflags) { \
SET_TASKPRI(cpuid); \ SET_TASKPRI(cpuid); \
_rt_switch_to_real_time(cpuid); \ _rt_switch_to_real_time(cpuid); \
} \ } \
} while (0) } while (0)
#define in_hrt_mode(cpuid) (rtai_linux_context[cpuid].sflags) #define in_hrt_mode(cpuid) (rtai_linux_context[cpuid].sflags)
@ -765,15 +765,15 @@ static inline unsigned long save_and_set_taskpri(unsigned long taskpri)
static inline void rt_set_timer_delay (int delay) { static inline void rt_set_timer_delay (int delay) {
if (delay) { if (delay) {
unsigned long flags; unsigned long flags;
rtai_hw_save_flags_and_cli(flags); rtai_hw_save_flags_and_cli(flags);
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
apic_write_around(APIC_TMICT, delay); apic_write_around(APIC_TMICT, delay);
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
outb(delay & 0xff,0x40); outb(delay & 0xff,0x40);
outb(delay >> 8,0x40); outb(delay >> 8,0x40);
#endif /* CONFIG_X86_LOCAL_APIC */ #endif /* CONFIG_X86_LOCAL_APIC */
rtai_hw_restore_flags(flags); rtai_hw_restore_flags(flags);
} }
} }
@ -818,11 +818,11 @@ int rt_set_irq_ack(unsigned int irq, int (*irq_ack)(unsigned int, void *));
static inline int rt_request_irq_wack(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode, int (*irq_ack)(unsigned int, void *)) static inline int rt_request_irq_wack(unsigned irq, int (*handler)(unsigned irq, void *cookie), void *cookie, int retmode, int (*irq_ack)(unsigned int, void *))
{ {
int retval; int retval;
if ((retval = rt_request_irq(irq, handler, cookie, retmode)) < 0) { if ((retval = rt_request_irq(irq, handler, cookie, retmode)) < 0) {
return retval; return retval;
} }
return rt_set_irq_ack(irq, irq_ack); return rt_set_irq_ack(irq, irq_ack);
} }
void rt_set_irq_cookie(unsigned irq, void *cookie); void rt_set_irq_cookie(unsigned irq, void *cookie);

View file

@ -2,7 +2,7 @@
* rtai_leds.c - mini-driver for generic control of digital signals * rtai_leds.c - mini-driver for generic control of digital signals
* *
* Copyright (C) 2000 Pierre Cloutier <pcloutier@PoseidonControls.com> * Copyright (C) 2000 Pierre Cloutier <pcloutier@PoseidonControls.com>
* 2001 David A. Schleef <ds@schleef.org> * 2001 David A. Schleef <ds@schleef.org>
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public * modify it under the terms of version 2 of the GNU General Public

View file

@ -27,12 +27,12 @@
#if defined(__KERNEL__) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) #if defined(__KERNEL__) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#define RT_REG_ORIG_AX orig_ax #define RT_REG_ORIG_AX orig_ax
#define RT_REG_SP sp #define RT_REG_SP sp
#define RT_REG_FLAGS flags #define RT_REG_FLAGS flags
#define RT_REG_IP ip #define RT_REG_IP ip
#define RT_REG_CS cs #define RT_REG_CS cs
#define RT_REG_BP bp #define RT_REG_BP bp
#define RTAI_SYSCALL_CODE bx #define RTAI_SYSCALL_CODE bx
#define RTAI_SYSCALL_ARGS cx #define RTAI_SYSCALL_ARGS cx
@ -50,12 +50,12 @@
#else #else
#define RT_REG_ORIG_AX orig_eax #define RT_REG_ORIG_AX orig_eax
#define RT_REG_SP esp #define RT_REG_SP esp
#define RT_REG_FLAGS eflags #define RT_REG_FLAGS eflags
#define RT_REG_IP eip #define RT_REG_IP eip
#define RT_REG_CS xcs #define RT_REG_CS xcs
#define RT_REG_BP ebp #define RT_REG_BP ebp
#define RTAI_SYSCALL_CODE ebx #define RTAI_SYSCALL_CODE ebx
#define RTAI_SYSCALL_ARGS ecx #define RTAI_SYSCALL_ARGS ecx
@ -78,11 +78,11 @@
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#define TIMER_NAME "APIC" #define TIMER_NAME "APIC"
#define TIMER_TYPE 1 #define TIMER_TYPE 1
#define HRT_LINUX_TIMER_NAME "lapic" #define HRT_LINUX_TIMER_NAME "lapic"
#define FAST_TO_READ_TSC #define FAST_TO_READ_TSC
#define TIMER_FREQ RTAI_FREQ_APIC #define TIMER_FREQ RTAI_FREQ_APIC
#define TIMER_LATENCY RTAI_LATENCY_APIC #define TIMER_LATENCY RTAI_LATENCY_APIC
#define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC #define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC
#define ONESHOT_SPAN (CPU_FREQ/(CONFIG_RTAI_CAL_FREQS_FACT + 2)) //(0x7FFFFFFFLL*(CPU_FREQ/TIMER_FREQ)) #define ONESHOT_SPAN (CPU_FREQ/(CONFIG_RTAI_CAL_FREQS_FACT + 2)) //(0x7FFFFFFFLL*(CPU_FREQ/TIMER_FREQ))
@ -97,10 +97,10 @@
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
#define USE_LINUX_TIMER #define USE_LINUX_TIMER
#define TIMER_NAME "8254-PIT" #define TIMER_NAME "8254-PIT"
#define TIMER_TYPE 0 #define TIMER_TYPE 0
#define HRT_LINUX_TIMER_NAME "pit" #define HRT_LINUX_TIMER_NAME "pit"
#define TIMER_FREQ RTAI_FREQ_8254 #define TIMER_FREQ RTAI_FREQ_8254
#define TIMER_LATENCY RTAI_LATENCY_8254 #define TIMER_LATENCY RTAI_LATENCY_8254
#define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254 #define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254
#define ONESHOT_SPAN ((0x7FFF*(CPU_FREQ/TIMER_FREQ))/(CONFIG_RTAI_CAL_FREQS_FACT + 1)) //(0x7FFF*(CPU_FREQ/TIMER_FREQ)) #define ONESHOT_SPAN ((0x7FFF*(CPU_FREQ/TIMER_FREQ))/(CONFIG_RTAI_CAL_FREQS_FACT + 1)) //(0x7FFF*(CPU_FREQ/TIMER_FREQ))
@ -120,7 +120,9 @@ extern "C" {
static inline void _lxrt_context_switch (struct task_struct *prev, struct task_struct *next, int cpuid) static inline void _lxrt_context_switch (struct task_struct *prev, struct task_struct *next, int cpuid)
{ {
extern void context_switch(void *, void *, void *); extern void context_switch(void *, void *, void *);
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) #if LINUX_VERSION_CODE > KERNEL_VERSION(3,14,0)
prev->thread.fpu_counter = 0;
#else
prev->fpu_counter = 0; prev->fpu_counter = 0;
#endif #endif
context_switch(0, prev, next); context_switch(0, prev, next);

View file

@ -31,15 +31,15 @@
#if defined(__KERNEL__) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) #if defined(__KERNEL__) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
#define RT_REG_ORIG_AX orig_ax #define RT_REG_ORIG_AX orig_ax
#define RT_REG_SP sp #define RT_REG_SP sp
#define RT_REG_SS ss #define RT_REG_SS ss
#define RT_REG_FLAGS flags #define RT_REG_FLAGS flags
#define RT_REG_IP ip #define RT_REG_IP ip
#define RT_REG_CS cs #define RT_REG_CS cs
#define RT_REG_BP bp #define RT_REG_BP bp
#define RT_REG_BX bx #define RT_REG_BX bx
#define RT_REG_CX cx #define RT_REG_CX cx
#define RTAI_SYSCALL_CODE di #define RTAI_SYSCALL_CODE di
#define RTAI_SYSCALL_ARGS si #define RTAI_SYSCALL_ARGS si
@ -57,15 +57,15 @@
#else #else
#define RT_REG_ORIG_AX orig_rax #define RT_REG_ORIG_AX orig_rax
#define RT_REG_SP rsp #define RT_REG_SP rsp
#define RT_REG_SS ss #define RT_REG_SS ss
#define RT_REG_FLAGS eflags #define RT_REG_FLAGS eflags
#define RT_REG_IP rip #define RT_REG_IP rip
#define RT_REG_CS cs #define RT_REG_CS cs
#define RT_REG_BP rbp #define RT_REG_BP rbp
#define RT_REG_BX rbx #define RT_REG_BX rbx
#define RT_REG_CX rcx #define RT_REG_CX rcx
#define RTAI_SYSCALL_CODE rdi #define RTAI_SYSCALL_CODE rdi
#define RTAI_SYSCALL_ARGS rsi #define RTAI_SYSCALL_ARGS rsi
@ -88,18 +88,18 @@
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
#define TIMER_NAME "APIC" #define TIMER_NAME "APIC"
#define TIMER_TYPE 1 #define TIMER_TYPE 1
#define HRT_LINUX_TIMER_NAME "lapic" #define HRT_LINUX_TIMER_NAME "lapic"
#define FAST_TO_READ_TSC #define FAST_TO_READ_TSC
#define TIMER_FREQ RTAI_FREQ_APIC #define TIMER_FREQ RTAI_FREQ_APIC
#define TIMER_LATENCY RTAI_LATENCY_APIC #define TIMER_LATENCY RTAI_LATENCY_APIC
#define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC #define TIMER_SETUP_TIME RTAI_SETUP_TIME_APIC
#define ONESHOT_SPAN (CPU_FREQ/(CONFIG_RTAI_CAL_FREQS_FACT + 2)) //(0x7FFFFFFFLL*(CPU_FREQ/TIMER_FREQ)) #define ONESHOT_SPAN (CPU_FREQ/(CONFIG_RTAI_CAL_FREQS_FACT + 2)) //(0x7FFFFFFFLL*(CPU_FREQ/TIMER_FREQ))
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
#define USE_LINUX_TIMER #define USE_LINUX_TIMER
#define update_linux_timer(cpuid) \ #define update_linux_timer(cpuid) \
do { hal_pend_uncond(LOCAL_TIMER_IPI, cpuid); } while (0) do { hal_pend_uncond(LOCAL_TIMER_IPI, cpuid); } while (0)
#else /* !CONFIG_GENERIC_CLOCKEVENTS */ #else /* !CONFIG_GENERIC_CLOCKEVENTS */
#define update_linux_timer(cpuid) #define update_linux_timer(cpuid)
#endif /* CONFIG_GENERIC_CLOCKEVENTS */ #endif /* CONFIG_GENERIC_CLOCKEVENTS */
@ -107,10 +107,10 @@
#else /* !CONFIG_X86_LOCAL_APIC */ #else /* !CONFIG_X86_LOCAL_APIC */
#define USE_LINUX_TIMER #define USE_LINUX_TIMER
#define TIMER_NAME "8254-PIT" #define TIMER_NAME "8254-PIT"
#define TIMER_TYPE 0 #define TIMER_TYPE 0
#define HRT_LINUX_TIMER_NAME "pit" #define HRT_LINUX_TIMER_NAME "pit"
#define TIMER_FREQ RTAI_FREQ_8254 #define TIMER_FREQ RTAI_FREQ_8254
#define TIMER_LATENCY RTAI_LATENCY_8254 #define TIMER_LATENCY RTAI_LATENCY_8254
#define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254 #define TIMER_SETUP_TIME RTAI_SETUP_TIME_8254
#define ONESHOT_SPAN ((0x7FFF*(CPU_FREQ/TIMER_FREQ))/(CONFIG_RTAI_CAL_FREQS_FACT + 1)) //(0x7FFF*(CPU_FREQ/TIMER_FREQ)) #define ONESHOT_SPAN ((0x7FFF*(CPU_FREQ/TIMER_FREQ))/(CONFIG_RTAI_CAL_FREQS_FACT + 1)) //(0x7FFF*(CPU_FREQ/TIMER_FREQ))
@ -132,7 +132,7 @@ static inline void _lxrt_context_switch (struct task_struct *prev, struct task_s
extern void *context_switch(void *, void *, void *); extern void *context_switch(void *, void *, void *);
#if 0 #if 0
/* REMARK: the line below is not needed in i386, why should it be so if both /* REMARK: the line below is not needed in i386, why should it be so if both
math_restore do a "clts" before orring TS_USEDFPU in status ????? */ math_restore do a "clts" before orring TS_USEDFPU in status ????? */
if (task_thread_info(prev)->status & TS_USEDFPU) clts(); if (task_thread_info(prev)->status & TS_USEDFPU) clts();
#endif #endif
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,14,0) #if LINUX_VERSION_CODE > KERNEL_VERSION(3,14,0)
@ -144,10 +144,10 @@ static inline void _lxrt_context_switch (struct task_struct *prev, struct task_s
} }
#define rt_copy_from_user(a, b, c) \ #define rt_copy_from_user(a, b, c) \
( { int ret = __copy_from_user_inatomic(a, b, c); ret; } ) ( { int ret = __copy_from_user_inatomic(a, b, c); ret; } )
#define rt_copy_to_user(a, b, c) \ #define rt_copy_to_user(a, b, c) \
( { int ret = __copy_to_user_inatomic(a, b, c); ret; } ) ( { int ret = __copy_to_user_inatomic(a, b, c); ret; } )
#define rt_put_user __put_user #define rt_put_user __put_user
#define rt_get_user __get_user #define rt_get_user __get_user
@ -155,7 +155,7 @@ static inline void _lxrt_context_switch (struct task_struct *prev, struct task_s
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
#define rt_strncpy_from_user(a, b, c) \ #define rt_strncpy_from_user(a, b, c) \
( { int ret = strncpy_from_user(a, b, c); ret; } ) ( { int ret = strncpy_from_user(a, b, c); ret; } )
#else #else

View file

@ -34,23 +34,23 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#define IFLAG RTAI_IFLAG #define IFLAG RTAI_IFLAG
#define hard_cli() rtai_cli() #define hard_cli() rtai_cli()
#define hard_sti() rtai_sti() #define hard_sti() rtai_sti()
#define hard_save_flags_and_cli(x) rtai_save_flags_and_cli(x) #define hard_save_flags_and_cli(x) rtai_save_flags_and_cli(x)
#define hard_restore_flags(x) rtai_restore_flags(x) #define hard_restore_flags(x) rtai_restore_flags(x)
#define hard_save_flags(x) rtai_save_flags(x) #define hard_save_flags(x) rtai_save_flags(x)
#define hard_cpu_id hal_processor_id #define hard_cpu_id hal_processor_id
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#ifndef __RTAI_HAL__ #ifndef __RTAI_HAL__
#define tuned rtai_tunables #define tuned rtai_tunables
#define NR_RT_CPUS RTAI_NR_CPUS #define NR_RT_CPUS RTAI_NR_CPUS
#define RT_TIME_END RTAI_TIME_LIMIT #define RT_TIME_END RTAI_TIME_LIMIT
#define CPU_FREQ RTAI_CPU_FREQ #define CPU_FREQ RTAI_CPU_FREQ
#define TIMER_8254_IRQ RTAI_TIMER_8254_IRQ #define TIMER_8254_IRQ RTAI_TIMER_8254_IRQ
#define FREQ_8254 RTAI_FREQ_8254 #define FREQ_8254 RTAI_FREQ_8254
#define LATENCY_8254 RTAI_LATENCY_8254 #define LATENCY_8254 RTAI_LATENCY_8254
@ -94,9 +94,9 @@ static inline int rt_free_cpu_own_irq (unsigned irq) {
} }
static inline unsigned long get_cr2 (void) { static inline unsigned long get_cr2 (void) {
unsigned long address; unsigned long address;
__asm__("movq %%cr2,%0":"=r" (address)); __asm__("movq %%cr2,%0":"=r" (address));
return address; return address;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View file

@ -38,7 +38,7 @@
static inline long long rtai_srq(long srq, unsigned long args) static inline long long rtai_srq(long srq, unsigned long args)
{ {
long long retval; long long retval;
syscall(RTAI_SRQ_SYSCALL_NR, srq, args, &retval); syscall(RTAI_SRQ_SYSCALL_NR, srq, args, &retval);
return retval; return retval;
} }

View file

@ -23,13 +23,13 @@
#define _STARTUP_IRQ 1 #define _STARTUP_IRQ 1
#define _SHUTDOWN_IRQ 2 #define _SHUTDOWN_IRQ 2
#define _ENABLE_IRQ 3 #define _ENABLE_IRQ 3
#define _DISABLE_IRQ 4 #define _DISABLE_IRQ 4
#define _MASK_AND_ACK_IRQ 5 #define _MASK_AND_ACK_IRQ 5
#define _ACK_IRQ 6 #define _ACK_IRQ 6
#define _UNMASK_IRQ 7 #define _UNMASK_IRQ 7
#define _DISINT 8 #define _DISINT 8
#define _ENINT 9 #define _ENINT 9
#define _SAVE_FLAGS_CLI 10 #define _SAVE_FLAGS_CLI 10
#define _RESTORE_FLAGS 11 #define _RESTORE_FLAGS 11

View file

@ -46,11 +46,11 @@
#ifdef ipipe_apic_vector_irq /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) */ #ifdef ipipe_apic_vector_irq /* LINUX_VERSION_CODE > KERNEL_VERSION(2,6,19) */
#define RTAI_APIC_HIGH_IPI ipipe_apic_vector_irq(RTAI_APIC_HIGH_VECTOR) #define RTAI_APIC_HIGH_IPI ipipe_apic_vector_irq(RTAI_APIC_HIGH_VECTOR)
#define RTAI_APIC_LOW_IPI ipipe_apic_vector_irq(RTAI_APIC_LOW_VECTOR) #define RTAI_APIC_LOW_IPI ipipe_apic_vector_irq(RTAI_APIC_LOW_VECTOR)
#define LOCAL_TIMER_IPI ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR) #define LOCAL_TIMER_IPI ipipe_apic_vector_irq(LOCAL_TIMER_VECTOR)
#else #else
#define RTAI_APIC_HIGH_IPI (RTAI_APIC_HIGH_VECTOR - FIRST_EXTERNAL_VECTOR) #define RTAI_APIC_HIGH_IPI (RTAI_APIC_HIGH_VECTOR - FIRST_EXTERNAL_VECTOR)
#define RTAI_APIC_LOW_IPI (RTAI_APIC_LOW_VECTOR - FIRST_EXTERNAL_VECTOR) #define RTAI_APIC_LOW_IPI (RTAI_APIC_LOW_VECTOR - FIRST_EXTERNAL_VECTOR)
#define LOCAL_TIMER_IPI (LOCAL_TIMER_VECTOR - FIRST_EXTERNAL_VECTOR) #define LOCAL_TIMER_IPI (LOCAL_TIMER_VECTOR - FIRST_EXTERNAL_VECTOR)
#endif #endif
#endif #endif

View file

@ -1,192 +0,0 @@
/*
* Copyright (C) 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _RTAI_BITS_H
#define _RTAI_BITS_H
#include <rtai_types.h>
#define RT_BITS_MAGIC 0x9ac24448 // nam2num("rtbits")
#define ALL_SET 0
#define ANY_SET 1
#define ALL_CLR 2
#define ANY_CLR 3
#define ALL_SET_AND_ANY_SET 4
#define ALL_SET_AND_ALL_CLR 5
#define ALL_SET_AND_ANY_CLR 6
#define ANY_SET_AND_ALL_CLR 7
#define ANY_SET_AND_ANY_CLR 8
#define ALL_CLR_AND_ANY_CLR 9
#define ALL_SET_OR_ANY_SET 10
#define ALL_SET_OR_ALL_CLR 11
#define ALL_SET_OR_ANY_CLR 12
#define ANY_SET_OR_ALL_CLR 13
#define ANY_SET_OR_ANY_CLR 14
#define ALL_CLR_OR_ANY_CLR 15
#define SET_BITS 0
#define CLR_BITS 1
#define SET_CLR_BITS 2
#define NOP_BITS 3
#define BITS_ERR (RTE_OBJINV) // same as semaphores
#define BITS_TIMOUT (RTE_TIMOUT) // same as semaphores
struct rt_bits_struct;
#ifdef __KERNEL__
#ifndef __cplusplus
typedef struct rt_bits_struct {
struct rt_queue queue; /* <= Must be first in struct. */
int magic;
int type; // to align mask to semaphore count, for easier uspace init
unsigned long mask;
} BITS;
#else /* __cplusplus */
extern "C" {
#endif /* !__cplusplus */
int __rtai_bits_init(void);
void __rtai_bits_exit(void);
void rt_bits_init(struct rt_bits_struct *bits, unsigned long mask);
int rt_bits_delete(struct rt_bits_struct *bits);
RTAI_SYSCALL_MODE unsigned long rt_get_bits(struct rt_bits_struct *bits);
RTAI_SYSCALL_MODE unsigned long rt_bits_reset(struct rt_bits_struct *bits, unsigned long mask);
RTAI_SYSCALL_MODE unsigned long rt_bits_signal(struct rt_bits_struct *bits, int setfun, unsigned long masks);
RTAI_SYSCALL_MODE int _rt_bits_wait(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space);
static inline int rt_bits_wait(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask)
{
return _rt_bits_wait(bits, testfun, testmasks, exitfun, exitmasks, resulting_mask, 1);
}
RTAI_SYSCALL_MODE int _rt_bits_wait_if(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space);
static inline int rt_bits_wait_if(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask)
{
return _rt_bits_wait_if(bits, testfun, testmasks, exitfun, exitmasks, resulting_mask, 1);
}
RTAI_SYSCALL_MODE int _rt_bits_wait_until(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME time, unsigned long *resulting_mask, int space);
static inline int rt_bits_wait_until(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME time, unsigned long *resulting_mask)
{
return _rt_bits_wait_until(bits, testfun, testmasks, exitfun, exitmasks, time, resulting_mask, 1);
}
RTAI_SYSCALL_MODE int _rt_bits_wait_timed(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME delay, unsigned long *resulting_mask, int space);
static inline int rt_bits_wait_timed(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME delay, unsigned long *resulting_mask)
{
return _rt_bits_wait_timed(bits, testfun, testmasks, exitfun, exitmasks, delay, resulting_mask, 1);
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#else /* !__KERNEL__ */
#include <rtai_lxrt.h>
#define BITSIDX 0
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
RTAI_PROTO(struct rt_bits_struct *,rt_bits_init,(unsigned long name, unsigned long mask))
{
struct { unsigned long name, mask; } arg = { name, mask };
return (struct rt_bits_struct *)rtai_lxrt(BITSIDX, SIZARG, BITS_INIT, &arg).v[LOW];
}
RTAI_PROTO(int, rt_bits_delete,(struct rt_bits_struct *bits))
{
struct { struct rt_bits_struct *bits; } arg = { bits };
return rtai_lxrt(BITSIDX, SIZARG, BITS_DELETE, &arg).i[LOW];
}
RTAI_PROTO(unsigned long, rt_get_bits,(struct rt_bits_struct *bits))
{
struct { struct rt_bits_struct *bits; } arg = { bits };
return rtai_lxrt(BITSIDX, SIZARG, BITS_GET, &arg).i[LOW];
}
RTAI_PROTO(unsigned long, rt_bits_reset,(struct rt_bits_struct *bits, unsigned long mask))
{
struct { struct rt_bits_struct *bits; unsigned long mask; } arg = { bits, mask };
return (unsigned long)rtai_lxrt(BITSIDX, SIZARG, BITS_RESET, &arg).i[LOW];
}
RTAI_PROTO(unsigned long, rt_bits_signal,(struct rt_bits_struct *bits, int setfun, unsigned long masks))
{
struct { struct rt_bits_struct *bits; long setfun; unsigned long masks; } arg = { bits, setfun, masks };
return rtai_lxrt(BITSIDX, SIZARG, BITS_SIGNAL, &arg).i[LOW];
}
RTAI_PROTO(int, rt_bits_wait,(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask))
{
struct { struct rt_bits_struct *bits; long testfun; unsigned long testmasks; long exitfun; unsigned long exitmasks; unsigned long *resulting_mask; long space; } arg = { bits, testfun, testmasks, exitfun, exitmasks, resulting_mask, 0 };
return rtai_lxrt(BITSIDX, SIZARG, BITS_WAIT, &arg).i[LOW];
}
RTAI_PROTO(int, rt_bits_wait_if,(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask))
{
struct { struct rt_bits_struct *bits; long testfun; unsigned long testmasks; long exitfun; unsigned long exitmasks; unsigned long *resulting_mask; long space; } arg = { bits, testfun, testmasks, exitfun, exitmasks, resulting_mask, 0 };
return rtai_lxrt(BITSIDX, SIZARG, BITS_WAIT_IF, &arg).i[LOW];
}
RTAI_PROTO(int, rt_bits_wait_until,(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME time, unsigned long *resulting_mask))
{
struct { struct rt_bits_struct *bits; long testfun; unsigned long testmasks; long exitfun; unsigned long exitmasks; RTIME time; unsigned long *resulting_mask; long space; } arg = { bits, testfun, testmasks, exitfun, exitmasks, time, resulting_mask, 0 };
return rtai_lxrt(BITSIDX, SIZARG, BITS_WAIT_UNTIL, &arg).i[LOW];
}
RTAI_PROTO(int, rt_bits_wait_timed,(struct rt_bits_struct *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME delay, unsigned long *resulting_mask))
{
struct { struct rt_bits_struct *bits; long testfun; unsigned long testmasks; long exitfun; unsigned long exitmasks; RTIME delay; unsigned long *resulting_mask; long space; } arg = { bits, testfun, testmasks, exitfun, exitmasks, delay, resulting_mask, 0 };
return rtai_lxrt(BITSIDX, SIZARG, BITS_WAIT_TIMED, &arg).i[LOW];
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __KERNEL__ */
#if !defined(__KERNEL__) || defined(__cplusplus)
typedef struct rt_bits_struct {
int opaque;
} BITS;
#endif /* !__KERNEL__ || __cplusplus */
#endif /* !_RTAI_BITS_H */

View file

@ -48,8 +48,8 @@
#define SET_ASYNC_SIG 14 #define SET_ASYNC_SIG 14
#define EAVESDROP 19 #define EAVESDROP 19
#define OVRWRITE 20 #define OVRWRITE 20
#define READ_IF 21 #define READ_IF 21
#define WRITE_IF 22 #define WRITE_IF 22
#define RTF_NAMED_CREATE 23 #define RTF_NAMED_CREATE 23
#define RTF_GET_N_FIFOS 15 #define RTF_GET_N_FIFOS 15
@ -75,24 +75,24 @@ struct rt_fifo_get_info_struct{
#define FUN_FIFOS_LXRT_INDX 10 #define FUN_FIFOS_LXRT_INDX 10
#define _CREATE 0 #define _CREATE 0
#define _DESTROY 1 #define _DESTROY 1
#define _PUT 2 #define _PUT 2
#define _GET 3 #define _GET 3
#define _RESET 4 #define _RESET 4
#define _RESIZE 5 #define _RESIZE 5
#define _SEM_INIT 6 #define _SEM_INIT 6
#define _SEM_DESTRY 7 #define _SEM_DESTRY 7
#define _SEM_POST 8 #define _SEM_POST 8
#define _SEM_TRY 9 #define _SEM_TRY 9
#define _CREATE_NAMED 10 #define _CREATE_NAMED 10
#define _GETBY_NAME 11 #define _GETBY_NAME 11
#define _OVERWRITE 12 #define _OVERWRITE 12
#define _PUT_IF 13 #define _PUT_IF 13
#define _GET_IF 14 #define _GET_IF 14
#define _NAMED_CREATE 15 #define _NAMED_CREATE 15
#define _AVBS 16 #define _AVBS 16
#define _FRBS 17 #define _FRBS 17
#ifdef __KERNEL__ #ifdef __KERNEL__
@ -148,7 +148,7 @@ RTAI_SYSCALL_MODE int rtf_create(unsigned int fifo, int size);
* An RT-FIFO is created with a name of name, it will be allocated * An RT-FIFO is created with a name of name, it will be allocated
* the first unused minor number and will have a user assigned size. * the first unused minor number and will have a user assigned size.
* Return value: On success, the allocated minor number is returned. * Return value: On success, the allocated minor number is returned.
* On error, -errno is returned. * On error, -errno is returned.
*/ */
int rtf_named_create(const char *name, int size); int rtf_named_create(const char *name, int size);
@ -158,7 +158,7 @@ int rtf_named_create(const char *name, int size);
* An RT-FIFO is created with a name of name, it will be allocated * An RT-FIFO is created with a name of name, it will be allocated
* the first unused minor number and will have a default size. * the first unused minor number and will have a default size.
* Return value: On success, the allocated minor number is returned. * Return value: On success, the allocated minor number is returned.
* On error, -errno is returned. * On error, -errno is returned.
*/ */
RTAI_SYSCALL_MODE int rtf_create_named(const char *name); RTAI_SYSCALL_MODE int rtf_create_named(const char *name);
@ -167,7 +167,7 @@ RTAI_SYSCALL_MODE int rtf_create_named(const char *name);
* *
* Find the RT-FIFO with the name name. * Find the RT-FIFO with the name name.
* Return value: On success, the minor number is returned. * Return value: On success, the minor number is returned.
* On error, -errno is returned. * On error, -errno is returned.
*/ */
RTAI_SYSCALL_MODE int rtf_getfifobyname(const char *name); RTAI_SYSCALL_MODE int rtf_getfifobyname(const char *name);

View file

@ -29,9 +29,9 @@
#define HAL_VERSION_STRING IPIPE_VERSION_STRING #define HAL_VERSION_STRING IPIPE_VERSION_STRING
#define HAL_NR_CPUS IPIPE_NR_CPUS #define HAL_NR_CPUS IPIPE_NR_CPUS
#define HAL_NR_FAULTS IPIPE_NR_FAULTS #define HAL_NR_FAULTS IPIPE_NR_FAULTS
#define HAL_NR_EVENTS IPIPE_NR_EVENTS #define HAL_NR_EVENTS IPIPE_NR_EVENTS
#define HAL_ROOT_NPTDKEYS IPIPE_ROOT_NPTDKEYS #define HAL_ROOT_NPTDKEYS IPIPE_ROOT_NPTDKEYS
#define HAL_APIC_HIGH_VECTOR IPIPE_HRTIMER_VECTOR //IPIPE_SERVICE_VECTOR3 #define HAL_APIC_HIGH_VECTOR IPIPE_HRTIMER_VECTOR //IPIPE_SERVICE_VECTOR3
@ -44,7 +44,7 @@
#define HAL_EXIT_PROCESS IPIPE_EVENT_EXIT #define HAL_EXIT_PROCESS IPIPE_EVENT_EXIT
#define HAL_KICK_PROCESS IPIPE_EVENT_SIGWAKE #define HAL_KICK_PROCESS IPIPE_EVENT_SIGWAKE
#define hal_pipeline __ipipe_pipeline #define hal_pipeline __ipipe_pipeline
#define hal_domain_struct ipipe_domain #define hal_domain_struct ipipe_domain
#define hal_root_domain ipipe_root_domain #define hal_root_domain ipipe_root_domain
@ -70,7 +70,7 @@
#define hal_lock_irq __ipipe_lock_irq #define hal_lock_irq __ipipe_lock_irq
#define hal_unlock_irq __ipipe_unlock_irq #define hal_unlock_irq __ipipe_unlock_irq
#define hal_std_irq_dtype __ipipe_std_irq_dtype #define hal_std_irq_dtype __ipipe_std_irq_dtype
#define hal_ipipe_std_irq_dtype __adeos_std_irq_dtype #define hal_ipipe_std_irq_dtype __adeos_std_irq_dtype
#define hal_tick_regs __ipipe_tick_regs #define hal_tick_regs __ipipe_tick_regs
@ -94,7 +94,7 @@
#define hal_suspend_domain() break #define hal_suspend_domain() break
#define hal_alloc_irq ipipe_alloc_virq #define hal_alloc_irq ipipe_alloc_virq
#define hal_free_irq ipipe_free_virq #define hal_free_irq ipipe_free_virq
#if !defined(CONFIG_PPC) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14))) #if !defined(CONFIG_PPC) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,4,32) || (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)))
#define hal_virtualize_irq ipipe_virtualize_irq #define hal_virtualize_irq ipipe_virtualize_irq
@ -112,13 +112,13 @@ do { \
} while (0) } while (0)
#endif #endif
#define hal_sysinfo_struct ipipe_sysinfo #define hal_sysinfo_struct ipipe_sysinfo
#define hal_attr_struct ipipe_domain_attr #define hal_attr_struct ipipe_domain_attr
#define hal_init_attr ipipe_init_attr #define hal_init_attr ipipe_init_attr
#define hal_register_domain ipipe_register_domain #define hal_register_domain ipipe_register_domain
#define hal_unregister_domain ipipe_unregister_domain #define hal_unregister_domain ipipe_unregister_domain
#define hal_catch_event ipipe_catch_event #define hal_catch_event ipipe_catch_event
#define hal_event_handler ipipe_event_handler #define hal_event_handler ipipe_event_handler
#define hal_event_handler_fun(e) legacy.handlers[e] //evhand[e] #define hal_event_handler_fun(e) legacy.handlers[e] //evhand[e]
#define hal_set_printk_sync ipipe_set_printk_sync #define hal_set_printk_sync ipipe_set_printk_sync
@ -136,8 +136,8 @@ do { \
#define hal_processor_id ipipe_processor_id #define hal_processor_id ipipe_processor_id
#define hal_hw_cli local_irq_disable_hw #define hal_hw_cli local_irq_disable_hw
#define hal_hw_sti local_irq_enable_hw #define hal_hw_sti local_irq_enable_hw
#define hal_hw_local_irq_save local_irq_save_hw #define hal_hw_local_irq_save local_irq_save_hw
#define hal_hw_local_irq_restore local_irq_restore_hw #define hal_hw_local_irq_restore local_irq_restore_hw
#define hal_hw_local_irq_flags local_save_flags_hw #define hal_hw_local_irq_flags local_save_flags_hw

View file

@ -108,8 +108,8 @@
#define WAKEUP_SLEEPING 34 #define WAKEUP_SLEEPING 34
#define CHANGE_TASK_PRIO 35 #define CHANGE_TASK_PRIO 35
#define SET_RESUME_TIME 36 #define SET_RESUME_TIME 36
#define SET_PERIOD 37 #define SET_PERIOD 37
#define HARD_TIMER_RUNNING 38 #define HARD_TIMER_RUNNING 38
// semaphores // semaphores
#define TYPED_SEM_INIT 39 #define TYPED_SEM_INIT 39
@ -165,7 +165,7 @@
#define MBX_RECEIVE_UNTIL 87 #define MBX_RECEIVE_UNTIL 87
#define MBX_RECEIVE_TIMED 88 #define MBX_RECEIVE_TIMED 88
#define MBX_EVDRP 89 #define MBX_EVDRP 89
#define MBX_OVRWR_SEND 90 #define MBX_OVRWR_SEND 90
// short intertask messages // short intertask messages
#define SENDMSG 91 #define SENDMSG 91
@ -201,63 +201,41 @@
#define EVDRPX 119 #define EVDRPX 119
// proxies // proxies
#define PROXY_ATTACH 120 #define PROXY_ATTACH 120
#define PROXY_DETACH 121 #define PROXY_DETACH 121
#define PROXY_TRIGGER 122 #define PROXY_TRIGGER 122
// synchronous user space specific intertask messages and related proxies // synchronous user space specific intertask messages and related proxies
#define RT_SEND 123 #define RT_SEND 123
#define RT_RECEIVE 124 #define RT_RECEIVE 124
#define RT_CRECEIVE 125 #define RT_CRECEIVE 125
#define RT_REPLY 126 #define RT_REPLY 126
#define RT_PROXY_ATTACH 127 #define RT_PROXY_ATTACH 127
#define RT_PROXY_DETACH 128 #define RT_PROXY_DETACH 128
#define RT_TRIGGER 129 #define RT_TRIGGER 129
#define RT_NAME_ATTACH 130 #define RT_NAME_ATTACH 130
#define RT_NAME_DETACH 131 #define RT_NAME_DETACH 131
#define RT_NAME_LOCATE 132 #define RT_NAME_LOCATE 132
// bits // bits
#define BITS_INIT 133 #define BITS_INIT 133
#define BITS_DELETE 134 #define BITS_DELETE 134
#define NAMED_BITS_INIT 135 #define NAMED_BITS_INIT 135
#define NAMED_BITS_DELETE 136 #define NAMED_BITS_DELETE 136
#define BITS_GET 137 #define BITS_GET 137
#define BITS_RESET 138 #define BITS_RESET 138
#define BITS_SIGNAL 139 #define BITS_SIGNAL 139
#define BITS_WAIT 140 #define BITS_WAIT 140
#define BITS_WAIT_IF 141 #define BITS_WAIT_IF 141
#define BITS_WAIT_UNTIL 142 #define BITS_WAIT_UNTIL 142
#define BITS_WAIT_TIMED 143 #define BITS_WAIT_TIMED 143
// typed mail boxes
#define TBX_INIT 144
#define TBX_DELETE 145
#define NAMED_TBX_INIT 146
#define NAMED_TBX_DELETE 147
#define TBX_SEND 148
#define TBX_SEND_IF 149
#define TBX_SEND_UNTIL 150
#define TBX_SEND_TIMED 151
#define TBX_RECEIVE 152
#define TBX_RECEIVE_IF 153
#define TBX_RECEIVE_UNTIL 154
#define TBX_RECEIVE_TIMED 155
#define TBX_BROADCAST 156
#define TBX_BROADCAST_IF 157
#define TBX_BROADCAST_UNTIL 158
#define TBX_BROADCAST_TIMED 159
#define TBX_URGENT 160
#define TBX_URGENT_IF 161
#define TBX_URGENT_UNTIL 162
#define TBX_URGENT_TIMED 163
// pqueue // pqueue
#define MQ_OPEN 164 #define MQ_OPEN 164
#define MQ_RECEIVE 165 #define MQ_RECEIVE 165
#define MQ_SEND 166 #define MQ_SEND 166
#define MQ_CLOSE 167 #define MQ_CLOSE 167
#define MQ_GETATTR 168 #define MQ_GETATTR 168
#define MQ_SETATTR 169 #define MQ_SETATTR 169
#define MQ_NOTIFY 170 #define MQ_NOTIFY 170
@ -271,8 +249,8 @@
#define NAMED_TASK_DELETE 176 #define NAMED_TASK_DELETE 176
// registry // registry
#define GET_ADR 177 #define GET_ADR 177
#define GET_NAME 178 #define GET_NAME 178
// netrpc // netrpc
#define NETRPC 179 #define NETRPC 179
@ -287,18 +265,18 @@
#define COND_SIGNAL 186 #define COND_SIGNAL 186
// new shm // new shm
#define SHM_ALLOC 187 #define SHM_ALLOC 187
#define SHM_FREE 188 #define SHM_FREE 188
#define SHM_SIZE 189 #define SHM_SIZE 189
#define HEAP_SET 190 #define HEAP_SET 190
#define HEAP_ALLOC 191 #define HEAP_ALLOC 191
#define HEAP_FREE 192 #define HEAP_FREE 192
#define HEAP_NAMED_ALLOC 193 #define HEAP_NAMED_ALLOC 193
#define HEAP_NAMED_FREE 194 #define HEAP_NAMED_FREE 194
#define MALLOC 195 #define MALLOC 195
#define FREE 196 #define FREE 196
#define NAMED_MALLOC 197 #define NAMED_MALLOC 197
#define NAMED_FREE 198 #define NAMED_FREE 198
#define SUSPEND_IF 199 #define SUSPEND_IF 199
#define SUSPEND_UNTIL 200 #define SUSPEND_UNTIL 200
@ -314,11 +292,11 @@
#define SCHED_UNLOCK 210 #define SCHED_UNLOCK 210
#define PEND_LINUX_IRQ 211 #define PEND_LINUX_IRQ 211
#define SET_LINUX_SYSCALL_MODE 212 #define SET_LINUX_SYSCALL_MODE 212
/*#define RETURN_LINUX_SYSCALL 213 available */ /*#define RETURN_LINUX_SYSCALL 213 available */
#define REQUEST_RTC 214 #define REQUEST_RTC 214
#define RELEASE_RTC 215 #define RELEASE_RTC 215
#define RT_GETTID 216 #define RT_GETTID 216
#define SET_NETRPC_TIMEOUT 217 #define SET_NETRPC_TIMEOUT 217
#define GET_REAL_TIME 218 #define GET_REAL_TIME 218
#define GET_REAL_TIME_NS 219 #define GET_REAL_TIME_NS 219
@ -343,9 +321,9 @@
// Qblk's // Qblk's
#define RT_INITTICKQUEUE 69 #define RT_INITTICKQUEUE 69
#define RT_RELEASETICKQUEUE 70 #define RT_RELEASETICKQUEUE 70
#define RT_QDYNALLOC 71 #define RT_QDYNALLOC 71
#define RT_QDYNFREE 72 #define RT_QDYNFREE 72
#define RT_QDYNINIT 73 #define RT_QDYNINIT 73
#define RT_QBLKWAIT 74 #define RT_QBLKWAIT 74
#define RT_QBLKREPEAT 75 #define RT_QBLKREPEAT 75
#define RT_QBLKSOON 76 #define RT_QBLKSOON 76
@ -372,11 +350,11 @@
#define RTAI_MALLOC 96 #define RTAI_MALLOC 96
#define RT_FREE 97 #define RT_FREE 97
#define RT_MMGR_STATS 98 #define RT_MMGR_STATS 98
#define RT_STOMP 99 #define RT_STOMP 99
// VC // VC
#define RT_VC_ATTACH 100 #define RT_VC_ATTACH 100
#define RT_VC_RELEASE 101 #define RT_VC_RELEASE 101
#define RT_VC_RESERVE 102 #define RT_VC_RESERVE 102
// Linux Signal Support // Linux Signal Support
#define RT_GET_LINUX_SIGNAL 103 #define RT_GET_LINUX_SIGNAL 103
#define RT_GET_ERRNO 104 #define RT_GET_ERRNO 104
@ -402,7 +380,7 @@
#define SET_USP_FLAGS 1016 #define SET_USP_FLAGS 1016
#define GET_USP_FLG_MSK 1017 #define GET_USP_FLG_MSK 1017
#define SET_USP_FLG_MSK 1018 #define SET_USP_FLG_MSK 1018
#define IS_HARD 1019 #define IS_HARD 1019
#define LINUX_SERVER 1020 #define LINUX_SERVER 1020
#define ALLOC_REGISTER 1021 #define ALLOC_REGISTER 1021
#define DELETE_DEREGISTER 1022 #define DELETE_DEREGISTER 1022
@ -429,8 +407,8 @@
#define INDX(x) (((x) >> 24) & 0xF) #define INDX(x) (((x) >> 24) & 0xF)
#define LINUX_SYSCALL_GET_MODE 0 #define LINUX_SYSCALL_GET_MODE 0
#define SYNC_LINUX_SYSCALL 1 #define SYNC_LINUX_SYSCALL 1
#define ASYNC_LINUX_SYSCALL 2 #define ASYNC_LINUX_SYSCALL 2
#define LINUX_SYSCALL_CANCELED 3 #define LINUX_SYSCALL_CANCELED 3
#define LINUX_SYSCALL_GET_CALLBACK ((void *)4) #define LINUX_SYSCALL_GET_CALLBACK ((void *)4)
@ -446,7 +424,7 @@ struct linux_syscalls_list { int in, out, nr, id, mode; void (*cbfun)(long, long
/* /*
Encoding of system call argument Encoding of system call argument
31 0 31 0
soft SRQ .... |||| |||| |||| .... .... .... .... 0 - 4095 max soft SRQ .... |||| |||| |||| .... .... .... .... 0 - 4095 max
int NARG .... .... .... .... |||| |||| |||| |||| int NARG .... .... .... .... |||| |||| |||| ||||
arg INDX |||| .... .... .... .... .... .... .... arg INDX |||| .... .... .... .... .... .... ....
@ -534,7 +512,7 @@ static inline struct rt_task_struct *pid2rttask(pid_t pid)
static inline long rttask2pid(struct rt_task_struct *task) static inline long rttask2pid(struct rt_task_struct *task)
{ {
return task->tid; return task->tid;
} }
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */ /*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
@ -647,18 +625,18 @@ static void hard_soft_toggler(int sig)
RTAI_PROTO(RT_TASK *, rt_task_init_schmod, (unsigned long name, int priority, int stack_size, int max_msg_size, int policy, int cpus_allowed)) RTAI_PROTO(RT_TASK *, rt_task_init_schmod, (unsigned long name, int priority, int stack_size, int max_msg_size, int policy, int cpus_allowed))
{ {
struct sched_param mysched; struct sched_param mysched;
struct { unsigned long name; long priority, stack_size, max_msg_size, cpus_allowed; } arg = { name ? name : rt_get_name(NULL), priority, stack_size, max_msg_size, cpus_allowed }; struct { unsigned long name; long priority, stack_size, max_msg_size, cpus_allowed; } arg = { name ? name : rt_get_name(NULL), priority, stack_size, max_msg_size, cpus_allowed };
SET_SIGNAL_TOGGLER(); SET_SIGNAL_TOGGLER();
if (policy == SCHED_OTHER) { if (policy == SCHED_OTHER) {
mysched.sched_priority = 0; mysched.sched_priority = 0;
} else if ((mysched.sched_priority = sched_get_priority_max(policy) - priority) < 1) { } else if ((mysched.sched_priority = sched_get_priority_max(policy) - priority) < 1) {
mysched.sched_priority = 1; mysched.sched_priority = 1;
} }
if (sched_setscheduler(0, policy, &mysched) < 0) { if (sched_setscheduler(0, policy, &mysched) < 0) {
return 0; return 0;
} }
rtai_iopl(); rtai_iopl();
mlockall(MCL_CURRENT | MCL_FUTURE); mlockall(MCL_CURRENT | MCL_FUTURE);
@ -674,7 +652,7 @@ RTAI_PROTO(long, rt_thread_create, (void *fun, void *args, int stack_size))
long thread; long thread;
pthread_attr_t attr; pthread_attr_t attr;
pthread_attr_init(&attr); pthread_attr_init(&attr);
if (!pthread_attr_setstacksize(&attr, stack_size > RT_THREAD_STACK_MIN ? stack_size : RT_THREAD_STACK_MIN)) { if (!pthread_attr_setstacksize(&attr, stack_size > RT_THREAD_STACK_MIN ? stack_size : RT_THREAD_STACK_MIN)) {
struct { unsigned long hs; } arg = { 0 }; struct { unsigned long hs; } arg = { 0 };
if ((arg.hs = rtai_lxrt(BIDX, SIZARG, IS_HARD, &arg).i[LOW])) { if ((arg.hs = rtai_lxrt(BIDX, SIZARG, IS_HARD, &arg).i[LOW])) {
@ -715,7 +693,7 @@ static void linux_syscall_server_fun(struct linux_syscalls_list *list)
struct linux_syscall calldata[syscalls.nr]; struct linux_syscall calldata[syscalls.nr];
syscalls.syscall = calldata; syscalls.syscall = calldata;
memset(calldata, 0, sizeof(calldata)); memset(calldata, 0, sizeof(calldata));
mlockall(MCL_CURRENT | MCL_FUTURE); mlockall(MCL_CURRENT | MCL_FUTURE);
list->serv = &syscalls; list->serv = &syscalls;
rtai_lxrt(BIDX, sizeof(RT_TASK *), RESUME, &syscalls.task); rtai_lxrt(BIDX, sizeof(RT_TASK *), RESUME, &syscalls.task);
while (abs(rtai_lxrt(BIDX, sizeof(RT_TASK *), SUSPEND, &syscalls.serv).i[LOW]) < RTE_LOWERR) { while (abs(rtai_lxrt(BIDX, sizeof(RT_TASK *), SUSPEND, &syscalls.serv).i[LOW]) < RTE_LOWERR) {
@ -734,7 +712,7 @@ static void linux_syscall_server_fun(struct linux_syscalls_list *list)
syscalls.out = 0; syscalls.out = 0;
} }
} }
} }
rtai_lxrt(BIDX, sizeof(RT_TASK *), LXRT_TASK_DELETE, &syscalls.serv); rtai_lxrt(BIDX, sizeof(RT_TASK *), LXRT_TASK_DELETE, &syscalls.serv);
} }
@ -1161,16 +1139,16 @@ RTAI_PROTO(int, rt_task_signal_handler, (RT_TASK *task, void (*handler)(void)))
RTAI_PROTO(int,rt_task_use_fpu,(RT_TASK *task, int use_fpu_flag)) RTAI_PROTO(int,rt_task_use_fpu,(RT_TASK *task, int use_fpu_flag))
{ {
struct { RT_TASK *task; long use_fpu_flag; } arg = { task, use_fpu_flag }; struct { RT_TASK *task; long use_fpu_flag; } arg = { task, use_fpu_flag };
if (rtai_lxrt(BIDX, SIZARG, RT_BUDDY, &arg).v[LOW] != task) { if (rtai_lxrt(BIDX, SIZARG, RT_BUDDY, &arg).v[LOW] != task) {
return rtai_lxrt(BIDX, SIZARG, TASK_USE_FPU, &arg).i[LOW]; return rtai_lxrt(BIDX, SIZARG, TASK_USE_FPU, &arg).i[LOW];
} else { } else {
// note that it would be enough to do whatever FP op here to have it OK. But // note that it would be enough to do whatever FP op here to have it OK. But
// that is scary if it is done when already in hard real time, and we do not // that is scary if it is done when already in hard real time, and we do not
// want to force users to call this before making it hard. // want to force users to call this before making it hard.
rtai_lxrt(BIDX, SIZARG, HRT_USE_FPU, &arg); rtai_lxrt(BIDX, SIZARG, HRT_USE_FPU, &arg);
return 0; return 0;
} }
} }
RTAI_PROTO(int,rt_buddy_task_use_fpu,(RT_TASK *task, int use_fpu_flag)) RTAI_PROTO(int,rt_buddy_task_use_fpu,(RT_TASK *task, int use_fpu_flag))
@ -1339,15 +1317,15 @@ RTAI_PROTO(int,rt_printk,(const char *format, ...))
RTAI_PROTO(int,rtai_print_to_screen,(const char *format, ...)) RTAI_PROTO(int,rtai_print_to_screen,(const char *format, ...))
{ {
char display[VSNPRINTF_BUF_SIZE]; char display[VSNPRINTF_BUF_SIZE];
struct { const char *display; long nch; } arg = { display, 0 }; struct { const char *display; long nch; } arg = { display, 0 };
va_list args; va_list args;
va_start(args, format); va_start(args, format);
arg.nch = vsnprintf(display, VSNPRINTF_BUF_SIZE, format, args); arg.nch = vsnprintf(display, VSNPRINTF_BUF_SIZE, format, args);
va_end(args); va_end(args);
rtai_lxrt(BIDX, SIZARG, PRINTK, &arg); rtai_lxrt(BIDX, SIZARG, PRINTK, &arg);
return arg.nch; return arg.nch;
} }
RTAI_PROTO(int,rt_usp_signal_handler,(void (*handler)(void))) RTAI_PROTO(int,rt_usp_signal_handler,(void (*handler)(void)))

View file

@ -96,12 +96,12 @@ typedef struct rtheap {
int flags; int flags;
u_long extentsize, u_long extentsize,
pagesize, pagesize,
pageshift, pageshift,
hdrsize, hdrsize,
npages, /* Number of pages per extent */ npages, /* Number of pages per extent */
ubytes, ubytes,
maxcont; maxcont;
struct list_head extents; struct list_head extents;

View file

@ -25,7 +25,7 @@
#ifndef _RTAI_MATH_H #ifndef _RTAI_MATH_H
#define _RTAI_MATH_H 1 #define _RTAI_MATH_H 1
#define _MATH_H 1 #define _MATH_H 1
#include <rtai_types.h> #include <rtai_types.h>
#ifdef __attribute_pure__ #ifdef __attribute_pure__
@ -34,121 +34,69 @@
#ifdef __attribute_used__ #ifdef __attribute_used__
#undef __attribute_used__ #undef __attribute_used__
#endif #endif
#include <features.h>
__BEGIN_DECLS #ifdef __cplusplus
extern "C" {
/* Get machine-dependent HUGE_VAL value (returned on overflow).
On all IEEE754 machines, this is +Infinity. */
#include <bits/huge_val.h>
/* Get machine-dependent NAN value (returned for some domain errors). */
#ifdef __USE_ISOC99
# include <bits/nan.h>
#endif #endif
/* Get general and ISO C99 specific information. */
#include <bits/mathdef.h>
extern double acos(double);
extern double asin(double);
extern double atan(double);
extern double atan2(double, double);
extern double ceil(double);
extern double copysign(double, double);
extern double cos(double);
extern double cosh(double);
extern double exp(double);
extern double expm1(double);
extern double fabs(double);
extern int finite(double);
extern double floor(double);
extern double fmod(double, double);
extern double frexp(double, int *);
extern double log(double);
extern double log10(double);
extern double modf(double, double *);
extern double pow(double, double);
extern double scalbn(double, long);
extern double sin(double);
extern double sinh(double);
extern double sqrt(double);
extern double tan(double);
extern double tanh(double);
extern double acosh(double);
extern double asinh(double);
extern double atanh(double);
extern double cabs(double __complex__ x);
extern double cbrt(double);
extern double drem(double, double);
extern double erf(double);
extern double erfc(double);
extern double gamma(double);
extern double gamma_r(double, int *);
extern double hypot(double, double);
extern int ilogb(double);
extern double j0(double);
extern double j1(double);
extern double jn(int, double);
extern double ldexp(double, int);
extern double lgamma(double);
extern double lgamma_r(double, int *);
extern double log1p(double);
extern double nextafter(double, double);
extern double remainder(double, double);
extern double rint(double);
extern double scalb(double, double);
extern double significand(double);
extern double y0(double);
extern double y1(double);
extern double yn(int, double);
extern int libm_errno;
/* The file <bits/mathcalls.h> contains the prototypes for all the
actual math functions. These macros are used for those prototypes,
so we can easily declare each function as both `name' and `__name',
and can declare the float versions `namef' and `__namef'. */
#define __MATHCALL(function,suffix, args) \
__MATHDECL (_Mdouble_,function,suffix, args)
#define __MATHDECL(type, function,suffix, args) \
__MATHDECL_1(type, function,suffix, args); \
__MATHDECL_1(type, __CONCAT(__,function),suffix, args)
#define __MATHCALLX(function,suffix, args, attrib) \
__MATHDECLX (_Mdouble_,function,suffix, args, attrib)
#define __MATHDECLX(type, function,suffix, args, attrib) \
__MATHDECL_1(type, function,suffix, args) __attribute__ (attrib); \
__MATHDECL_1(type, __CONCAT(__,function),suffix, args) __attribute__ (attrib)
#define __MATHDECL_1(type, function,suffix, args) \
extern type __MATH_PRECNAME(function,suffix) args __THROW
#define _Mdouble_ double
#define __MATH_PRECNAME(name,r) __CONCAT(name,r)
// added for gcc-3.2
#define _Mdouble_BEGIN_NAMESPACE __BEGIN_NAMESPACE_STD
#define _Mdouble_END_NAMESPACE __END_NAMESPACE_STD
// end added for gcc-3.2
#include <bits/mathcalls.h>
#undef _Mdouble_
// added for gcc-3.2
#undef _Mdouble_BEGIN_NAMESPACE
#undef _Mdouble_END_NAMESPACE
// end added for gcc-3.2
#undef __MATH_PRECNAME
#if defined __USE_MISC || defined __USE_ISOC99
/* Include the file of declarations again, this time using `float'
instead of `double' and appending f to each function name. */
# ifndef _Mfloat_
# define _Mfloat_ float
# endif
# define _Mdouble_ _Mfloat_
# ifdef __STDC__
# define __MATH_PRECNAME(name,r) name##f##r
# else
# define __MATH_PRECNAME(name,r) name/**/f/**/r
# endif
// added for gcc-3.2
#define _Mdouble_BEGIN_NAMESPACE __BEGIN_NAMESPACE_C99
#define _Mdouble_END_NAMESPACE __END_NAMESPACE_C99
// end added for gcc-3.2
# include <bits/mathcalls.h>
# undef _Mdouble_
// added for gcc-3.2
# undef _Mdouble_BEGIN_NAMESPACE
# undef _Mdouble_END_NAMESPACE
// end added for gcc-3.2
# undef __MATH_PRECNAME
# if (__STDC__ - 0 || __GNUC__ - 0) && !defined __NO_LONG_DOUBLE_MATH
/* Include the file of declarations again, this time using `long double'
instead of `double' and appending l to each function name. */
# ifndef _Mlong_double_
# define _Mlong_double_ long double
# endif
# define _Mdouble_ _Mlong_double_
# ifdef __STDC__
# define __MATH_PRECNAME(name,r) name##l##r
# else
# define __MATH_PRECNAME(name,r) name/**/l/**/r
# endif
// added for gcc-3.2
#define _Mdouble_BEGIN_NAMESPACE __BEGIN_NAMESPACE_C99
#define _Mdouble_END_NAMESPACE __END_NAMESPACE_C99
// end added for gcc-3.2
# include <bits/mathcalls.h>
# undef _Mdouble_
// added for gcc-3.2
# undef _Mdouble_BEGIN_NAMESPACE
# undef _Mdouble_END_NAMESPACE
// end added for gcc-3.2
# undef __MATH_PRECNAME
# endif /* __STDC__ || __GNUC__ */
#endif /* Use misc or ISO C99. */
#undef __MATHDECL_1
#undef __MATHDECL
#undef __MATHCALL
#if defined __USE_MISC || defined __USE_XOPEN
/* This variable is used by `gamma' and `lgamma'. */ /* This variable is used by `gamma' and `lgamma'. */
extern int signgam; extern int signgam;
#endif
/* ISO C99 defines some generic macros which work on any data type. */
#if defined(__USE_ISOC99) && __USE_ISOC99 #if defined(__USE_ISOC99) && __USE_ISOC99
/* Get the architecture specific values describing the floating-point /* Get the architecture specific values describing the floating-point
@ -239,16 +187,16 @@ enum
# define isnormal(x) (fpclassify (x) == FP_NORMAL) # define isnormal(x) (fpclassify (x) == FP_NORMAL)
/* Return nonzero value if X is a NaN. We could use `fpclassify' but /* Return nonzero value if X is a NaN. We could use `fpclassify' but
we already have this functions `__isnan' and it is faster. */ we already have this functions `__builtin_isnan' and it is faster. */
# ifdef __NO_LONG_DOUBLE_MATH # ifdef __NO_LONG_DOUBLE_MATH
# define isnan(x) \ # define isnan(x) \
(sizeof (x) == sizeof (float) ? __isnanf (x) : __isnan (x)) (sizeof (x) == sizeof (float) ? __builtin_isnanf (x) : __builtin_isnan (x))
# else # else
# define isnan(x) \ # define isnan(x) \
(sizeof (x) == sizeof (float) \ (sizeof (x) == sizeof (float) \
? __isnanf (x) \ ? __builtin_isnanf (x) \
: sizeof (x) == sizeof (double) \ : sizeof (x) == sizeof (double) \
? __isnan (x) : __isnanl (x)) ? __builtin_isnan (x) : __builtin_isnanl (x))
# endif # endif
/* Return nonzero value is X is positive or negative infinity. */ /* Return nonzero value is X is positive or negative infinity. */
@ -442,21 +390,16 @@ extern int matherr (struct exception *__exc);
#endif #endif
__END_DECLS
/* Missing declarations */ /* Missing declarations */
struct complex {
double x;
double y;
};
double cabs __P((struct complex));
double gamma_r(double x, int *signgamp); /* wrapper lgamma_r */ double gamma_r(double x, int *signgamp); /* wrapper lgamma_r */
long int rinttol(double x); long int rinttol(double x);
long int roundtol(double x); long int roundtol(double x);
#ifdef __cplusplus
}
#endif
#endif /* !_RTAI_MATH_H */ #endif /* !_RTAI_MATH_H */

View file

@ -3,7 +3,7 @@
* *
* Copyright (©) 1999 Zentropic Computing, All rights reserved * Copyright (©) 1999 Zentropic Computing, All rights reserved
* *
* Authors: Trevor Woolven (trevw@zentropix.com) * Authors: Trevor Woolven (trevw@zentropix.com)
* *
* Original date: Thu 15 Jul 1999 * Original date: Thu 15 Jul 1999
* *
@ -44,9 +44,9 @@
#define MQ_MAX_MSG_PRIORITY MQ_PRIO_MAX /* Highest priority message */ #define MQ_MAX_MSG_PRIORITY MQ_PRIO_MAX /* Highest priority message */
#define MAX_PQUEUES 4 /* Maximum number of message queues in module, #define MAX_PQUEUES 4 /* Maximum number of message queues in module,
remember to update rtai_pmq.h too. */ remember to update rtai_pmq.h too. */
#define MAX_MSGSIZE 50 /* Maximum message size per queue (bytes) */ #define MAX_MSGSIZE 50 /* Maximum message size per queue (bytes) */
#define MAX_MSGS 10 /* Maximum number of messages per queue */ #define MAX_MSGS 10 /* Maximum number of messages per queue */
#define O_NOTIFY_NP 0x1000 #define O_NOTIFY_NP 0x1000

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (C) 2002 POSEIDON CONTROLS INC <pcloutier@poseidoncontrols.com> * Copyright (C) 2002 POSEIDON CONTROLS INC <pcloutier@poseidoncontrols.com>
* Paolo Mantegazza <mantegazza@aero.polimi.it> * Paolo Mantegazza <mantegazza@aero.polimi.it>
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as * modify it under the terms of the GNU General Public License as
@ -46,8 +46,8 @@ struct proxy_t {
char *msg; char *msg;
}; };
#define SYNCMSG 0 #define SYNCMSG 0
#define PROXY -1 #define PROXY -1
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {

View file

@ -69,7 +69,7 @@
*/ */
NAM2NUM_PROTO(unsigned long, nam2num, (const char *name)) NAM2NUM_PROTO(unsigned long, nam2num, (const char *name))
{ {
unsigned long retval = 0; unsigned long retval = 0;
int c, i; int c, i;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
@ -102,12 +102,12 @@ NAM2NUM_PROTO(unsigned long, nam2num, (const char *name))
*/ */
NAM2NUM_PROTO(void, num2nam, (unsigned long num, char *name)) NAM2NUM_PROTO(void, num2nam, (unsigned long num, char *name))
{ {
int c, i, k, q; int c, i, k, q;
if (num >= MAX_NAM2NUM) { if (num >= MAX_NAM2NUM) {
strncpy(name, "|null|", 7); strncpy(name, "|null|", 7);
return; return;
} }
i = 5; i = 5;
num -= 2; num -= 2;
while (num && i >= 0) { while (num && i >= 0) {
q = num/40; q = num/40;

View file

@ -508,18 +508,18 @@ static inline RT_TASK *RT_rpc_timed(unsigned long node, int port, RT_TASK *task,
static inline int RT_isrpc(unsigned long node, int port, RT_TASK *task) static inline int RT_isrpc(unsigned long node, int port, RT_TASK *task)
{ {
if (node) { if (node) {
struct { RT_TASK *task; } arg = { task }; struct { RT_TASK *task; } arg = { task };
return rt_net_rpc(PACKPORT(port, NET_RPC_EXT, ISRPC, 0), 0, &arg, SIZARG, 1, PARTYPES1(VADR)).i[LOW]; return rt_net_rpc(PACKPORT(port, NET_RPC_EXT, ISRPC, 0), 0, &arg, SIZARG, 1, PARTYPES1(VADR)).i[LOW];
} }
return rt_isrpc(task); return rt_isrpc(task);
} }
static inline RT_TASK *RT_return(unsigned long node, int port, RT_TASK *task, unsigned long result) static inline RT_TASK *RT_return(unsigned long node, int port, RT_TASK *task, unsigned long result)
{ {
if (!task || !node) { if (!task || !node) {
return rt_return(task, result); return rt_return(task, result);
} }
return rt_return(rt_find_asgn_stub(OWNER(node, task), 1), result) ? task : 0; return rt_return(rt_find_asgn_stub(OWNER(node, task), 1), result) ? task : 0;
} }
@ -840,7 +840,7 @@ static inline int rt_waiting_return(unsigned long node, int port)
static inline int rt_sync_net_rpc(unsigned long node, int port) static inline int rt_sync_net_rpc(unsigned long node, int port)
{ {
if (node) { if (node) {
struct { long dummy; } arg = { 0 }; struct { long dummy; } arg = { 0 };
struct { unsigned long fun; long type; void *args; long argsize; long space; unsigned long partypes; } args = { PACKPORT(abs(port), NET_RPC_EXT, SYNC_NET_RPC, 0), 0, &arg, SIZARG, 0, PARTYPES1(UINT) }; struct { unsigned long fun; long type; void *args; long argsize; long space; unsigned long partypes; } args = { PACKPORT(abs(port), NET_RPC_EXT, SYNC_NET_RPC, 0), 0, &arg, SIZARG, 0, PARTYPES1(UINT) };
return rtai_lxrt(NET_RPC_IDX, SIZARGS, NETRPC, &args).i[LOW]; return rtai_lxrt(NET_RPC_IDX, SIZARGS, NETRPC, &args).i[LOW];
} }
@ -1118,7 +1118,7 @@ static inline RT_TASK *RT_send_timed(unsigned long node, int port, RT_TASK *task
static inline RT_TASK *RT_evdrp(unsigned long node, int port, RT_TASK *task, void *msg) static inline RT_TASK *RT_evdrp(unsigned long node, int port, RT_TASK *task, void *msg)
{ {
if (!task || !node) { if (!task || !node) {
return rt_evdrp(task, msg); return rt_evdrp(task, msg);
} }
return rt_evdrp(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0; return rt_evdrp(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0;
@ -1126,7 +1126,7 @@ static inline RT_TASK *RT_evdrp(unsigned long node, int port, RT_TASK *task, voi
static inline RT_TASK *RT_receive(unsigned long node, int port, RT_TASK *task, void *msg) static inline RT_TASK *RT_receive(unsigned long node, int port, RT_TASK *task, void *msg)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receive(task, msg); return rt_receive(task, msg);
} }
return rt_receive(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0; return rt_receive(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0;
@ -1134,7 +1134,7 @@ static inline RT_TASK *RT_receive(unsigned long node, int port, RT_TASK *task, v
static inline RT_TASK *RT_receive_if(unsigned long node, int port, RT_TASK *task, void *msg) static inline RT_TASK *RT_receive_if(unsigned long node, int port, RT_TASK *task, void *msg)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receive_if(task, msg); return rt_receive_if(task, msg);
} }
return rt_receive_if(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0; return rt_receive_if(rt_find_asgn_stub(OWNER(node, task), 1), msg) ? task : 0;
@ -1142,7 +1142,7 @@ static inline RT_TASK *RT_receive_if(unsigned long node, int port, RT_TASK *task
static inline RT_TASK *RT_receive_until(unsigned long node, int port, RT_TASK *task, void *msg, RTIME time) static inline RT_TASK *RT_receive_until(unsigned long node, int port, RT_TASK *task, void *msg, RTIME time)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receive_until(task, msg, nano2count(time)); return rt_receive_until(task, msg, nano2count(time));
} }
return rt_receive_until(rt_find_asgn_stub(OWNER(node, task), 1), msg, nano2count(time)) ? task : 0; return rt_receive_until(rt_find_asgn_stub(OWNER(node, task), 1), msg, nano2count(time)) ? task : 0;
@ -1150,7 +1150,7 @@ static inline RT_TASK *RT_receive_until(unsigned long node, int port, RT_TASK *t
static inline RT_TASK *RT_receive_timed(unsigned long node, int port, RT_TASK *task, void *msg, RTIME delay) static inline RT_TASK *RT_receive_timed(unsigned long node, int port, RT_TASK *task, void *msg, RTIME delay)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receive_timed(task, msg, nano2count(delay)); return rt_receive_timed(task, msg, nano2count(delay));
} }
return rt_receive_timed(rt_find_asgn_stub(OWNER(node, task), 1), msg, nano2count(delay)) ? task : 0; return rt_receive_timed(rt_find_asgn_stub(OWNER(node, task), 1), msg, nano2count(delay)) ? task : 0;
@ -1209,7 +1209,7 @@ static inline int RT_isrpc(unsigned long node, int port, RT_TASK *task)
static inline RT_TASK *RT_return(unsigned long node, int port, RT_TASK *task, unsigned long result) static inline RT_TASK *RT_return(unsigned long node, int port, RT_TASK *task, unsigned long result)
{ {
if (!task || !node) { if (!task || !node) {
return rt_return(task, result); return rt_return(task, result);
} }
return rt_return(rt_find_asgn_stub(OWNER(node, task), 1), result) ? task : 0; return rt_return(rt_find_asgn_stub(OWNER(node, task), 1), result) ? task : 0;
@ -1299,7 +1299,7 @@ static inline RT_TASK *RT_sendx_timed(unsigned long node, int port, RT_TASK *tas
static inline RT_TASK *RT_returnx(unsigned long node, int port, RT_TASK *task, void *msg, int size) static inline RT_TASK *RT_returnx(unsigned long node, int port, RT_TASK *task, void *msg, int size)
{ {
if (!task || !node) { if (!task || !node) {
return rt_returnx(task, msg, size); return rt_returnx(task, msg, size);
} }
return rt_returnx(rt_find_asgn_stub(OWNER(node, task), 1), msg, size) ? task : 0; return rt_returnx(rt_find_asgn_stub(OWNER(node, task), 1), msg, size) ? task : 0;
@ -1307,7 +1307,7 @@ static inline RT_TASK *RT_returnx(unsigned long node, int port, RT_TASK *task, v
static inline RT_TASK *RT_evdrpx(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len) static inline RT_TASK *RT_evdrpx(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len)
{ {
if (!task || !node) { if (!task || !node) {
return rt_evdrpx(task, msg, size, len); return rt_evdrpx(task, msg, size, len);
} }
return rt_evdrpx(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0; return rt_evdrpx(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0;
@ -1315,7 +1315,7 @@ static inline RT_TASK *RT_evdrpx(unsigned long node, int port, RT_TASK *task, vo
static inline RT_TASK *RT_receivex(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len) static inline RT_TASK *RT_receivex(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receivex(task, msg, size, len); return rt_receivex(task, msg, size, len);
} }
return rt_receivex(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0; return rt_receivex(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0;
@ -1323,7 +1323,7 @@ static inline RT_TASK *RT_receivex(unsigned long node, int port, RT_TASK *task,
static inline RT_TASK *RT_receivex_if(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len) static inline RT_TASK *RT_receivex_if(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receivex_if(task, msg, size, len); return rt_receivex_if(task, msg, size, len);
} }
return rt_receivex_if(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0; return rt_receivex_if(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len) ? task : 0;
@ -1331,7 +1331,7 @@ static inline RT_TASK *RT_receivex_if(unsigned long node, int port, RT_TASK *tas
static inline RT_TASK *RT_receivex_until(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len, RTIME time) static inline RT_TASK *RT_receivex_until(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len, RTIME time)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receivex_until(task, msg, size, len, nano2count(time)); return rt_receivex_until(task, msg, size, len, nano2count(time));
} }
return rt_receivex_until(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len, nano2count(time)) ? task : 0; return rt_receivex_until(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len, nano2count(time)) ? task : 0;
@ -1339,7 +1339,7 @@ static inline RT_TASK *RT_receivex_until(unsigned long node, int port, RT_TASK *
static inline RT_TASK *RT_receivex_timed(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len, RTIME delay) static inline RT_TASK *RT_receivex_timed(unsigned long node, int port, RT_TASK *task, void *msg, int size, long *len, RTIME delay)
{ {
if (!task || !node) { if (!task || !node) {
return rt_receivex_timed(task, msg, size, len, nano2count(delay)); return rt_receivex_timed(task, msg, size, len, nano2count(delay));
} }
return rt_receivex_timed(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len, nano2count(delay)) ? task : 0; return rt_receivex_timed(rt_find_asgn_stub(OWNER(node, task), 1), msg, size, len, nano2count(delay)) ? task : 0;
@ -1553,19 +1553,19 @@ static inline int rt_get_net_rpc_ret(MBX *mbx, unsigned long long *retval, void
#define RT_isrpcx(task) RT_isrpc(task) #define RT_isrpcx(task) RT_isrpc(task)
#define RT_waiting_return rt_waiting_return #define RT_waiting_return rt_waiting_return
#define RT_sync_net_rpc rt_sync_net_rpc #define RT_sync_net_rpc rt_sync_net_rpc
#define RT_request_port rt_request_port #define RT_request_port rt_request_port
#define RT_request_port_id rt_request_port_id #define RT_request_port_id rt_request_port_id
#define RT_request_port_mbx rt_request_port_mbx #define RT_request_port_mbx rt_request_port_mbx
#define RT_request_port_id_mbx rt_request_port_id_mbx #define RT_request_port_id_mbx rt_request_port_id_mbx
#define RT_request_soft_port rt_request_soft_port #define RT_request_soft_port rt_request_soft_port
#define RT_request_soft_port_id rt_request_soft_port_id #define RT_request_soft_port_id rt_request_soft_port_id
@ -1573,7 +1573,7 @@ static inline int rt_get_net_rpc_ret(MBX *mbx, unsigned long long *retval, void
#define RT_request_soft_port_id_mbx rt_request_soft_port_id_mbx #define RT_request_soft_port_id_mbx rt_request_soft_port_id_mbx
#define RT_request_hard_port rt_request_hard_port #define RT_request_hard_port rt_request_hard_port
#define RT_request_hard_port_id rt_request_hard_port_id #define RT_request_hard_port_id rt_request_hard_port_id
@ -1581,13 +1581,13 @@ static inline int rt_get_net_rpc_ret(MBX *mbx, unsigned long long *retval, void
#define RT_request_hard_port_id_mbx rt_request_hard_port_id_mbx #define RT_request_hard_port_id_mbx rt_request_hard_port_id_mbx
#define RT_release_port rt_release_port #define RT_release_port rt_release_port
#define rt_request_port rt_request_soft_port #define rt_request_port rt_request_soft_port
#define rt_request_port_id rt_request_soft_port_id #define rt_request_port_id rt_request_soft_port_id
#define rt_request_port_mbx rt_request_soft_port_mbx #define rt_request_port_mbx rt_request_soft_port_mbx
#define rt_request_port_id_mbx rt_request_soft_port_id_mbx #define rt_request_port_id_mbx rt_request_soft_port_id_mbx

View file

@ -3,7 +3,7 @@
* *
* Copyright (©) 1999 Zentropic Computing, All rights reserved * Copyright (©) 1999 Zentropic Computing, All rights reserved
* *
* Authors: Trevor Woolven (trevw@zentropix.com) * Authors: Trevor Woolven (trevw@zentropix.com)
* *
* Original date: Thu 15 Jul 1999 * Original date: Thu 15 Jul 1999
* *
@ -44,9 +44,9 @@
#define MQ_MAX_MSG_PRIORITY MQ_PRIO_MAX /* Highest priority message */ #define MQ_MAX_MSG_PRIORITY MQ_PRIO_MAX /* Highest priority message */
#define MAX_PQUEUES 4 /* Maximum number of message queues in module, #define MAX_PQUEUES 4 /* Maximum number of message queues in module,
remember to update rtai_mq.h too. */ remember to update rtai_mq.h too. */
#define MAX_MSGSIZE 50 /* Maximum message size per queue (bytes) */ #define MAX_MSGSIZE 50 /* Maximum message size per queue (bytes) */
#define MAX_MSGS 10 /* Maximum number of messages per queue */ #define MAX_MSGS 10 /* Maximum number of messages per queue */
#define O_NOTIFY_NP 0x1000 #define O_NOTIFY_NP 0x1000

View file

@ -20,70 +20,70 @@
#ifndef _RTAI_POSIX_H_ #ifndef _RTAI_POSIX_H_
#define _RTAI_POSIX_H_ #define _RTAI_POSIX_H_
#define sem_open_rt sem_open #define sem_open_rt sem_open
#define sem_close_rt sem_close #define sem_close_rt sem_close
#define sem_init_rt sem_init #define sem_init_rt sem_init
#define sem_destroy_rt sem_destroy #define sem_destroy_rt sem_destroy
#define sem_wait_rt sem_wait #define sem_wait_rt sem_wait
#define sem_trywait_rt sem_trywait #define sem_trywait_rt sem_trywait
#define sem_timedwait_rt sem_timedwait #define sem_timedwait_rt sem_timedwait
#define sem_post_rt sem_post #define sem_post_rt sem_post
#define sem_getvalue_rt sem_getvalue #define sem_getvalue_rt sem_getvalue
#define pthread_mutex_open_rt pthread_mutex_open #define pthread_mutex_open_rt pthread_mutex_open
#define pthread_mutex_close_rt pthread_mutex_close #define pthread_mutex_close_rt pthread_mutex_close
#define pthread_mutex_init_rt pthread_mutex_init #define pthread_mutex_init_rt pthread_mutex_init
#define pthread_mutex_destroy_rt pthread_mutex_destroy #define pthread_mutex_destroy_rt pthread_mutex_destroy
#define pthread_mutex_lock_rt pthread_mutex_lock #define pthread_mutex_lock_rt pthread_mutex_lock
#define pthread_mutex_timedlock_rt pthread_mutex_timedlock #define pthread_mutex_timedlock_rt pthread_mutex_timedlock
#define pthread_mutex_trylock_rt pthread_mutex_trylock #define pthread_mutex_trylock_rt pthread_mutex_trylock
#define pthread_mutex_unlock_rt pthread_mutex_unlock #define pthread_mutex_unlock_rt pthread_mutex_unlock
#define pthread_cond_open_rt pthread_cond_open #define pthread_cond_open_rt pthread_cond_open
#define pthread_cond_close_rt pthread_cond_close #define pthread_cond_close_rt pthread_cond_close
#define pthread_cond_init_rt pthread_cond_init #define pthread_cond_init_rt pthread_cond_init
#define pthread_cond_destroy_rt pthread_cond_destroy #define pthread_cond_destroy_rt pthread_cond_destroy
#define pthread_cond_signal_rt pthread_cond_signal #define pthread_cond_signal_rt pthread_cond_signal
#define pthread_cond_broadcast_rt pthread_cond_broadcast #define pthread_cond_broadcast_rt pthread_cond_broadcast
#define pthread_cond_wait_rt pthread_cond_wait #define pthread_cond_wait_rt pthread_cond_wait
#define pthread_cond_timedwait_rt pthread_cond_timedwait #define pthread_cond_timedwait_rt pthread_cond_timedwait
#define pthread_barrier_open_rt pthread_barrier_open #define pthread_barrier_open_rt pthread_barrier_open
#define pthread_barrier_close_rt pthread_barrier_close #define pthread_barrier_close_rt pthread_barrier_close
#define pthread_barrier_init_rt pthread_barrier_init #define pthread_barrier_init_rt pthread_barrier_init
#define pthread_barrier_destroy_rt pthread_barrier_destroy #define pthread_barrier_destroy_rt pthread_barrier_destroy
#define pthread_barrier_wait_rt pthread_barrier_wait #define pthread_barrier_wait_rt pthread_barrier_wait
#define pthread_rwlock_open_rt pthread_rwlock_open #define pthread_rwlock_open_rt pthread_rwlock_open
#define pthread_rwlock_close_rt pthread_rwlock_close #define pthread_rwlock_close_rt pthread_rwlock_close
#define pthread_rwlock_init_rt pthread_rwlock_init #define pthread_rwlock_init_rt pthread_rwlock_init
#define pthread_rwlock_destroy_rt pthread_rwlock_destroy #define pthread_rwlock_destroy_rt pthread_rwlock_destroy
#define pthread_rwlock_rdlock_rt pthread_rwlock_rdlock #define pthread_rwlock_rdlock_rt pthread_rwlock_rdlock
#define pthread_rwlock_tryrdlock_rt pthread_rwlock_tryrdlock #define pthread_rwlock_tryrdlock_rt pthread_rwlock_tryrdlock
#define pthread_rwlock_timedrdlock_rt pthread_rwlock_timedrdlock #define pthread_rwlock_timedrdlock_rt pthread_rwlock_timedrdlock
#define pthread_rwlock_wrlock_rt pthread_rwlock_wrlock #define pthread_rwlock_wrlock_rt pthread_rwlock_wrlock
#define pthread_rwlock_trywrlock_rt pthread_rwlock_trywrlock #define pthread_rwlock_trywrlock_rt pthread_rwlock_trywrlock
#define pthread_rwlock_timedwrlock_rt pthread_rwlock_timedwrlock #define pthread_rwlock_timedwrlock_rt pthread_rwlock_timedwrlock
#define pthread_rwlock_unlock_rt pthread_rwlock_unlock #define pthread_rwlock_unlock_rt pthread_rwlock_unlock
#define pthread_spin_init_rt pthread_spin_init #define pthread_spin_init_rt pthread_spin_init
#define pthread_spin_destroy_rt pthread_spin_destroy #define pthread_spin_destroy_rt pthread_spin_destroy
#define pthread_spin_lock_rt pthread_spin_lock #define pthread_spin_lock_rt pthread_spin_lock
#define pthread_spin_trylock_rt pthread_spin_trylock #define pthread_spin_trylock_rt pthread_spin_trylock
#define pthread_spin_unlock_rt pthread_spin_unlock #define pthread_spin_unlock_rt pthread_spin_unlock
#define sched_get_max_priority_rt sched_get_max_priority #define sched_get_max_priority_rt sched_get_max_priority
#define sched_get_min_priority_rt sched_get_min_priority #define sched_get_min_priority_rt sched_get_min_priority
#define pthread_create_rt pthread_create #define pthread_create_rt pthread_create
#define pthread_yield_rt pthread_yield #define pthread_yield_rt pthread_yield
#define pthread_exit_rt pthread_exit #define pthread_exit_rt pthread_exit
#define pthread_join_rt pthread_join #define pthread_join_rt pthread_join
#define pthread_cancel_rt pthread_cancel #define pthread_cancel_rt pthread_cancel
#define pthread_equal_rt pthread_equal #define pthread_equal_rt pthread_equal
#define pthread_self_rt pthread_self #define pthread_self_rt pthread_self
#define pthread_attr_init_rt pthread_attr_init #define pthread_attr_init_rt pthread_attr_init
#define pthread_attr_destroy_rt pthread_attr_destroy #define pthread_attr_destroy_rt pthread_attr_destroy
#define pthread_attr_setschedparam_rt pthread_attr_setschedparam #define pthread_attr_setschedparam_rt pthread_attr_setschedparam
#define pthread_attr_getschedparam_rt pthread_attr_getschedparam #define pthread_attr_getschedparam_rt pthread_attr_getschedparam
#define pthread_attr_setschedpolicy_rt pthread_attr_setschedpolicy #define pthread_attr_setschedpolicy_rt pthread_attr_setschedpolicy
@ -92,15 +92,15 @@
#define pthread_attr_getschedrr_rt pthread_attr_getschedrr #define pthread_attr_getschedrr_rt pthread_attr_getschedrr
#define pthread_attr_setstacksize_rt pthread_attr_setstacksize #define pthread_attr_setstacksize_rt pthread_attr_setstacksize
#define pthread_attr_getstacksize_rt pthread_attr_getstacksize #define pthread_attr_getstacksize_rt pthread_attr_getstacksize
#define pthread_attr_setstack_rt pthread_attr_setstack #define pthread_attr_setstack_rt pthread_attr_setstack
#define pthread_attr_getstack_rt pthread_attr_getstack #define pthread_attr_getstack_rt pthread_attr_getstack
#define pthread_testcancel_rt pthread_testcancel #define pthread_testcancel_rt pthread_testcancel
#define clock_gettime_rt clock_gettime #define clock_gettime_rt clock_gettime
#define nanosleep_rt nanosleep #define nanosleep_rt nanosleep
#define pthread_cleanup_push_rt pthread_cleanup_push #define pthread_cleanup_push_rt pthread_cleanup_push
#define pthread_cleanup_pop_rt pthread_cleanup_pop #define pthread_cleanup_pop_rt pthread_cleanup_pop
/* /*
* _RT DO NOTHING FUNCTIONS * _RT DO NOTHING FUNCTIONS
@ -502,7 +502,7 @@ static inline int pthread_condattr_destroy(pthread_condattr_t *attr)
static inline int pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared) static inline int pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
{ {
*pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
return 0; return 0;
} }
static inline int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared) static inline int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
@ -529,11 +529,11 @@ static inline int pthread_condattr_setclock(pthread_condattr_t *condattr, clocki
static inline int pthread_condattr_getclock(pthread_condattr_t *condattr, clockid_t *clockid) static inline int pthread_condattr_getclock(pthread_condattr_t *condattr, clockid_t *clockid)
{ {
if (clockid) { if (clockid) {
*clockid = ((int *)condattr)[0]; *clockid = ((int *)condattr)[0];
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
} }
/* /*
@ -659,23 +659,23 @@ static inline int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
static inline int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared) static inline int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
{ {
*pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
return 0; return 0;
return 0; return 0;
} }
static inline int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared) static inline int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared)
{ {
if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) { if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
if (pshared == PTHREAD_PROCESS_PRIVATE) { if (pshared == PTHREAD_PROCESS_PRIVATE) {
((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED; ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
} else { } else {
((long *)attr)[0] |= RTAI_MUTEX_PSHARED; ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
} }
return 0; return 0;
} }
return -EINVAL; return -EINVAL;
} }
static inline int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t *attr, int *pref) static inline int pthread_rwlockattr_getkind_np(const pthread_rwlockattr_t *attr, int *pref)
@ -802,7 +802,7 @@ static inline pthread_t pthread_self(void)
static inline int pthread_attr_init(pthread_attr_t *attr) static inline int pthread_attr_init(pthread_attr_t *attr)
{ {
attr->stacksize = STACK_SIZE; attr->stacksize = STACK_SIZE;
attr->policy = SCHED_FIFO; attr->policy = SCHED_FIFO;
attr->rr_quantum_ns = RR_QUANTUM_NS; attr->rr_quantum_ns = RR_QUANTUM_NS;
attr->priority = 1; attr->priority = 1;
return 0; return 0;
@ -987,24 +987,24 @@ static inline int clock_nanosleep(int clockid, int flags, const struct timespec
} }
return -EINTR; return -EINTR;
} }
return 0; return 0;
} }
static inline int nanosleep(const struct timespec *rqtp, struct timespec *rmtp) static inline int nanosleep(const struct timespec *rqtp, struct timespec *rmtp)
{ {
RTIME expire; RTIME expire;
if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec <
0) { 0) {
return -EINVAL; return -EINVAL;
} }
rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp)); rt_sleep_until(expire = rt_get_time() + timespec2count(rqtp));
if ((expire -= rt_get_time()) > 0) { if ((expire -= rt_get_time()) > 0) {
if (rmtp) { if (rmtp) {
count2timespec(expire, rmtp); count2timespec(expire, rmtp);
} }
return -EINTR; return -EINTR;
} }
return 0; return 0;
} }
/* /*
@ -1530,15 +1530,15 @@ RTAI_PROTO(int, __wrap_pthread_mutexattr_gettype, (const pthread_mutexattr_t *at
RTAI_PROTO(int, pthread_make_periodic_np, (pthread_t thread, struct timespec *start_delay, struct timespec *period)) RTAI_PROTO(int, pthread_make_periodic_np, (pthread_t thread, struct timespec *start_delay, struct timespec *period))
{ {
struct { RT_TASK *task; RTIME start_time, period; } arg = { NULL, start_delay->tv_sec*1000000000LL + start_delay->tv_nsec, period->tv_sec*1000000000LL + period->tv_nsec }; struct { RT_TASK *task; RTIME start_time, period; } arg = { NULL, start_delay->tv_sec*1000000000LL + start_delay->tv_nsec, period->tv_sec*1000000000LL + period->tv_nsec };
int retval; int retval;
return !(retval = rtai_lxrt(BIDX, SIZARG, MAKE_PERIODIC_NS, &arg).i[LOW]) ? 0 : retval == RTE_UNBLKD ? EINTR : ETIMEDOUT; return !(retval = rtai_lxrt(BIDX, SIZARG, MAKE_PERIODIC_NS, &arg).i[LOW]) ? 0 : retval == RTE_UNBLKD ? EINTR : ETIMEDOUT;
} }
RTAI_PROTO(int, pthread_wait_period_np, (void)) RTAI_PROTO(int, pthread_wait_period_np, (void))
{ {
struct { unsigned long dummy; } arg; struct { unsigned long dummy; } arg;
return rtai_lxrt(BIDX, SIZARG, WAIT_PERIOD, &arg).i[LOW]; return rtai_lxrt(BIDX, SIZARG, WAIT_PERIOD, &arg).i[LOW];
} }
/* /*
@ -1648,7 +1648,7 @@ RTAI_PROTO(int, __wrap_pthread_condattr_destroy, (pthread_condattr_t *attr))
RTAI_PROTO(int, __wrap_pthread_condattr_getpshared, (const pthread_condattr_t *attr, int *pshared)) RTAI_PROTO(int, __wrap_pthread_condattr_getpshared, (const pthread_condattr_t *attr, int *pshared))
{ {
*pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
return 0; return 0;
} }
RTAI_PROTO(int, __wrap_pthread_condattr_setpshared, (pthread_condattr_t *attr, int pshared)) RTAI_PROTO(int, __wrap_pthread_condattr_setpshared, (pthread_condattr_t *attr, int pshared))
@ -1679,11 +1679,11 @@ RTAI_PROTO(int, __wrap_pthread_condattr_setclock, (pthread_condattr_t *condattr,
RTAI_PROTO(int, __wrap_pthread_condattr_getclock, (pthread_condattr_t *condattr, clockid_t *clockid)) RTAI_PROTO(int, __wrap_pthread_condattr_getclock, (pthread_condattr_t *condattr, clockid_t *clockid))
{ {
if (clockid) { if (clockid) {
*clockid = ((int *)condattr)[0]; *clockid = ((int *)condattr)[0];
return 0; return 0;
} }
return EINVAL; return EINVAL;
} }
/* /*
@ -1694,7 +1694,7 @@ RTAI_PROTO(int, __wrap_pthread_rwlock_init, (pthread_rwlock_t *rwlock, pthread_r
{ {
struct { unsigned long name; long type; } arg = { rt_get_name(0), RESEM_CHEKWT }; struct { unsigned long name; long type; } arg = { rt_get_name(0), RESEM_CHEKWT };
((pthread_rwlock_t **)rwlock)[0] = (pthread_rwlock_t *)rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).v[LOW]; ((pthread_rwlock_t **)rwlock)[0] = (pthread_rwlock_t *)rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).v[LOW];
return 0; return 0;
} }
RTAI_PROTO(int, __wrap_pthread_rwlock_destroy, (pthread_rwlock_t *rwlock)) RTAI_PROTO(int, __wrap_pthread_rwlock_destroy, (pthread_rwlock_t *rwlock))
@ -1786,23 +1786,23 @@ RTAI_PROTO(int, __wrap_pthread_rwlockattr_destroy, (pthread_rwlockattr_t *attr))
RTAI_PROTO(int, __wrap_pthread_rwlockattr_getpshared, (const pthread_rwlockattr_t *attr, int *pshared)) RTAI_PROTO(int, __wrap_pthread_rwlockattr_getpshared, (const pthread_rwlockattr_t *attr, int *pshared))
{ {
*pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE; *pshared = (((long *)attr)[0] & RTAI_MUTEX_PSHARED) != 0 ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
return 0; return 0;
return 0; return 0;
} }
RTAI_PROTO(int, __wrap_pthread_rwlockattr_setpshared, (pthread_rwlockattr_t *attr, int pshared)) RTAI_PROTO(int, __wrap_pthread_rwlockattr_setpshared, (pthread_rwlockattr_t *attr, int pshared))
{ {
if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) { if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
if (pshared == PTHREAD_PROCESS_PRIVATE) { if (pshared == PTHREAD_PROCESS_PRIVATE) {
((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED; ((long *)attr)[0] &= ~RTAI_MUTEX_PSHARED;
} else { } else {
((long *)attr)[0] |= RTAI_MUTEX_PSHARED; ((long *)attr)[0] |= RTAI_MUTEX_PSHARED;
} }
return 0; return 0;
} }
return EINVAL; return EINVAL;
} }
RTAI_PROTO(int, __wrap_pthread_rwlockattr_getkind_np, (const pthread_rwlockattr_t *attr, int *pref)) RTAI_PROTO(int, __wrap_pthread_rwlockattr_getkind_np, (const pthread_rwlockattr_t *attr, int *pref))
@ -1930,8 +1930,8 @@ RTAI_PROTO(void, pthread_soft_real_time_np, (void))
RTAI_PROTO(int, pthread_gettid_np, (void)) RTAI_PROTO(int, pthread_gettid_np, (void))
{ {
struct { unsigned long dummy; } arg; struct { unsigned long dummy; } arg;
return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW]; return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW];
} }
#define PTHREAD_SOFT_REAL_TIME PTHREAD_SOFT_REAL_TIME_NP #define PTHREAD_SOFT_REAL_TIME PTHREAD_SOFT_REAL_TIME_NP
@ -1966,7 +1966,7 @@ struct local_pthread_args_struct { void *(*start_routine)(void *); void *arg; in
static void *support_thread_fun(struct local_pthread_args_struct *args) static void *support_thread_fun(struct local_pthread_args_struct *args)
{ {
RT_TASK *task; RT_TASK *task;
void *(*start_routine)(void *) = args->start_routine; void *(*start_routine)(void *) = args->start_routine;
void *arg = args->arg; void *arg = args->arg;
pthread_t thread; pthread_t thread;
@ -2141,8 +2141,8 @@ RTAI_PROTO(int, __wrap_pthread_spin_unlock,(pthread_spinlock_t *lock))
#else #else
static inline int _pthread_gettid_np(void) static inline int _pthread_gettid_np(void)
{ {
struct { unsigned long dummy; } arg; struct { unsigned long dummy; } arg;
return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW]; return rtai_lxrt(BIDX, SIZARG, RT_GETTID, &arg).i[LOW];
} }
RTAI_PROTO(int, __wrap_pthread_spin_init, (pthread_spinlock_t *lock, int pshared)) RTAI_PROTO(int, __wrap_pthread_spin_init, (pthread_spinlock_t *lock, int pshared))
@ -2244,7 +2244,7 @@ RTAI_PROTO(int, __wrap_clock_nanosleep,(clockid_t clockid, int flags, const stru
if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME) { if (clockid != CLOCK_MONOTONIC && clockid != CLOCK_REALTIME) {
return ENOTSUP; return ENOTSUP;
} }
if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) { if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
return EINVAL; return EINVAL;
@ -2258,7 +2258,7 @@ RTAI_PROTO(int, __wrap_clock_nanosleep,(clockid_t clockid, int flags, const stru
expire += rt_get_tscnt(); expire += rt_get_tscnt();
} }
ret = rt_sleep_until(expire); ret = rt_sleep_until(expire);
expire -= rt_get_tscnt(); expire -= rt_get_tscnt();
} else { } else {
if (flags != TIMER_ABSTIME) { if (flags != TIMER_ABSTIME) {
expire += rt_get_real_time(); expire += rt_get_real_time();
@ -2280,7 +2280,7 @@ RTAI_PROTO(int, __wrap_clock_nanosleep,(clockid_t clockid, int flags, const stru
RTAI_PROTO(int, __wrap_nanosleep,(const struct timespec *rqtp, struct timespec *rmtp)) RTAI_PROTO(int, __wrap_nanosleep,(const struct timespec *rqtp, struct timespec *rmtp))
{ {
int canc_type, ret; int canc_type, ret;
RTIME expire; RTIME expire;
if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) { if (rqtp->tv_nsec >= 1000000000L || rqtp->tv_nsec < 0 || rqtp->tv_sec < 0) {
return -EINVAL; return -EINVAL;
@ -2299,7 +2299,7 @@ RTAI_PROTO(int, __wrap_nanosleep,(const struct timespec *rqtp, struct timespec *
pthread_setcanceltype(canc_type, NULL); pthread_setcanceltype(canc_type, NULL);
return 0; return 0;
} }
/* /*
@ -2435,10 +2435,10 @@ RTAI_PROTO (int, __wrap_timer_delete, (timer_t timerid))
* GOING HARD REAL TIME * GOING HARD REAL TIME
*/ */
#define pthread_self_rt pthread_self #define pthread_self_rt pthread_self
#define pthread_equal_rt pthread_equal #define pthread_equal_rt pthread_equal
#define pthread_attr_init_rt pthread_attr_init #define pthread_attr_init_rt pthread_attr_init
#define pthread_attr_destroy_rt pthread_attr_destroy #define pthread_attr_destroy_rt pthread_attr_destroy
#define pthread_attr_getdetachstate_rt pthread_attr_getdetachstate #define pthread_attr_getdetachstate_rt pthread_attr_getdetachstate
#define pthread_attr_setschedpolicy_rt pthread_attr_setschedpolicy #define pthread_attr_setschedpolicy_rt pthread_attr_setschedpolicy
#define pthread_attr_getschedpolicy_rt pthread_attr_getschedpolicy #define pthread_attr_getschedpolicy_rt pthread_attr_getschedpolicy
@ -2446,8 +2446,8 @@ RTAI_PROTO (int, __wrap_timer_delete, (timer_t timerid))
#define pthread_attr_getschedparam_rt pthread_attr_getschedparam #define pthread_attr_getschedparam_rt pthread_attr_getschedparam
#define pthread_attr_setinheritsched_rt pthread_attr_setinheritsched #define pthread_attr_setinheritsched_rt pthread_attr_setinheritsched
#define pthread_attr_getinheritsched_rt pthread_attr_getinheritsched #define pthread_attr_getinheritsched_rt pthread_attr_getinheritsched
#define pthread_attr_setscope_rt pthread_attr_setscope #define pthread_attr_setscope_rt pthread_attr_setscope
#define pthread_attr_getscope_rt pthread_attr_getscope #define pthread_attr_getscope_rt pthread_attr_getscope
#ifdef __USE_UNIX98 #ifdef __USE_UNIX98
#define pthread_attr_setguardsize_rt pthread_attr_setguardsize #define pthread_attr_setguardsize_rt pthread_attr_setguardsize
#define pthread_attr_getguardsize_rt pthread_attr_getguardsize #define pthread_attr_getguardsize_rt pthread_attr_getguardsize
@ -2455,8 +2455,8 @@ RTAI_PROTO (int, __wrap_timer_delete, (timer_t timerid))
#define pthread_attr_setstackaddr_rt pthread_attr_setstackaddr #define pthread_attr_setstackaddr_rt pthread_attr_setstackaddr
#define pthread_attr_getstackaddr_rt pthread_attr_getstackaddr #define pthread_attr_getstackaddr_rt pthread_attr_getstackaddr
#ifdef __USE_XOPEN2K #ifdef __USE_XOPEN2K
#define pthread_attr_setstack_rt pthread_attr_setstack #define pthread_attr_setstack_rt pthread_attr_setstack
#define pthread_attr_getstack_rt pthread_attr_getstack #define pthread_attr_getstack_rt pthread_attr_getstack
#endif #endif
#define pthread_attr_setstacksize_rt pthread_attr_setstacksize #define pthread_attr_setstacksize_rt pthread_attr_setstacksize
#define pthread_attr_getstacksize_rt pthread_attr_getstacksize #define pthread_attr_getstacksize_rt pthread_attr_getstacksize

View file

@ -80,51 +80,51 @@ static inline void *CREATE_PROC_ENTRY(const char *name, umode_t mode, void *pare
// this macro so we can add variables with out // this macro so we can add variables with out
// changing the users of this macro, of course // changing the users of this macro, of course
// only when the names don't colide! // only when the names don't colide!
#define PROC_PRINT_VARS \ #define PROC_PRINT_VARS \
off_t pos = 0; \ off_t pos = 0; \
off_t begin = 0; \ off_t begin = 0; \
int len = 0 /* no ";" */ int len = 0 /* no ";" */
// macro that prints in the procfs read buffer. // macro that prints in the procfs read buffer.
// this macro expects the function arguments to be // this macro expects the function arguments to be
// named as follows. // named as follows.
// static int FOO(char *page, char **start, // static int FOO(char *page, char **start,
// off_t off, int count, int *eof, void *data) // off_t off, int count, int *eof, void *data)
#define PROC_PRINT(fmt,args...) \ #define PROC_PRINT(fmt,args...) \
do { \ do { \
len += sprintf(page + len , fmt, ##args); \ len += sprintf(page + len , fmt, ##args); \
pos += len; \ pos += len; \
if(pos < off) { \ if(pos < off) { \
len = 0; \ len = 0; \
begin = pos; \ begin = pos; \
} \ } \
if(pos > off + count) \ if(pos > off + count) \
goto done; \ goto done; \
} while(0) } while(0)
// macro to leave the read function for a other // macro to leave the read function for a other
// place than at the end. // place than at the end.
#define PROC_PRINT_RETURN \ #define PROC_PRINT_RETURN \
do { \ do { \
*eof = 1; \ *eof = 1; \
goto done; \ goto done; \
} while(0) } while(0)
// macro that should only used ones at the end of the // macro that should only used ones at the end of the
// read function, to return from a other place in the // read function, to return from a other place in the
// read function use the PROC_PRINT_RETURN macro. // read function use the PROC_PRINT_RETURN macro.
#define PROC_PRINT_DONE \ #define PROC_PRINT_DONE \
do { \ do { \
*eof = 1; \ *eof = 1; \
done: \ done: \
*start = page + (off - begin); \ *start = page + (off - begin); \
len -= (off - begin); \ len -= (off - begin); \
if(len > count) \ if(len > count) \
len = count; \ len = count; \
if(len < 0) \ if(len < 0) \
len = 0; \ len = 0; \
return len; \ return len; \
} while(0) } while(0)
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0) */ #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(3,10,0) */

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (C) 2000 Pierre Cloutier <pcloutier@poseidoncontrols.com> * Copyright (C) 2000 Pierre Cloutier <pcloutier@poseidoncontrols.com>
* Paolo Mantegazza <mantegazza@aero.polimi.it> * Paolo Mantegazza <mantegazza@aero.polimi.it>
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as * modify it under the terms of the GNU General Public License as

View file

@ -6,7 +6,7 @@
* *
* @note Copyright &copy; 1999 Paolo Mantegazza <mantegazza@aero.polimi.it>, * @note Copyright &copy; 1999 Paolo Mantegazza <mantegazza@aero.polimi.it>,
* extensions for user space modules are jointly copyrighted (2000) with: * extensions for user space modules are jointly copyrighted (2000) with:
* Pierre Cloutier <pcloutier@poseidoncontrols.com>, * Pierre Cloutier <pcloutier@poseidoncontrols.com>,
* Steve Papacharalambous <stevep@zentropix.com>. * Steve Papacharalambous <stevep@zentropix.com>.
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
@ -36,22 +36,20 @@ struct rt_registry_entry {
void *adr; // Physical rt memory address of resource void *adr; // Physical rt memory address of resource
struct task_struct *tsk; // Linux task owner of the resource struct task_struct *tsk; // Linux task owner of the resource
int type; // Type of resource int type; // Type of resource
unsigned short count; // Usage registry unsigned short count; // Usage registry
unsigned short alink; unsigned short alink;
unsigned short nlink; unsigned short nlink;
}; };
#define MAX_SLOTS CONFIG_RTAI_SCHED_LXRT_NUMSLOTS // Max number of registered objects #define MAX_SLOTS CONFIG_RTAI_SCHED_LXRT_NUMSLOTS // Max number of registered objects
#define IS_TASK 0 // Used to identify registered resources #define IS_TASK 0 // Used to identify registered resources
#define IS_SEM 1 #define IS_SEM 1
#define IS_RWL 2 #define IS_RWL 2
#define IS_SPL 3 #define IS_SPL 3
#define IS_MBX 4 #define IS_MBX 4
#define IS_PRX 5 #define IS_PRX 5
#define IS_BIT 6 #define IS_HPCK 6
#define IS_TBX 7
#define IS_HPCK 8
#ifdef __KERNEL__ #ifdef __KERNEL__

View file

@ -79,7 +79,7 @@ extern "C" {
RTAI_PROTO(struct rtai_rwlock *, rt_typed_rwl_init,(unsigned long name, int type)) RTAI_PROTO(struct rtai_rwlock *, rt_typed_rwl_init,(unsigned long name, int type))
{ {
struct { unsigned long name; long type; } arg = { name, type }; struct { unsigned long name; long type; } arg = { name, type };
return (struct rtai_rwlock *)rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).v[LOW]; return (struct rtai_rwlock *)rtai_lxrt(BIDX, SIZARG, LXRT_RWL_INIT, &arg).v[LOW];
} }

View file

@ -22,7 +22,7 @@
#if !( __GNUC__ == 2 && __GNUC_MINOR__ > 8 && __GNUC_MINOR__ < 96 ) && \ #if !( __GNUC__ == 2 && __GNUC_MINOR__ > 8 && __GNUC_MINOR__ < 96 ) && \
__GNUC__ > 4 __GNUC__ > 4
#warning: You are likely using an unsupported GCC version! \ #warning: You are likely using an unsupported GCC version! \
Please read GCC-WARNINGS carefully. Please read GCC-WARNINGS carefully.
#endif #endif
#endif /* !_RTAI_SANITY_H */ #endif /* !_RTAI_SANITY_H */

View file

@ -37,13 +37,13 @@
#define RT_RESEM_SUSPDEL (-0x7fffFfff) #define RT_RESEM_SUSPDEL (-0x7fffFfff)
#define RT_SCHED_READY 1 #define RT_SCHED_READY 1
#define RT_SCHED_SUSPENDED 2 #define RT_SCHED_SUSPENDED 2
#define RT_SCHED_DELAYED 4 #define RT_SCHED_DELAYED 4
#define RT_SCHED_SEMAPHORE 8 #define RT_SCHED_SEMAPHORE 8
#define RT_SCHED_SEND 16 #define RT_SCHED_SEND 16
#define RT_SCHED_RECEIVE 32 #define RT_SCHED_RECEIVE 32
#define RT_SCHED_RPC 64 #define RT_SCHED_RPC 64
#define RT_SCHED_RETURN 128 #define RT_SCHED_RETURN 128
#define RT_SCHED_MBXSUSP 256 #define RT_SCHED_MBXSUSP 256
#define RT_SCHED_SFTRDY 512 #define RT_SCHED_SFTRDY 512
@ -110,7 +110,7 @@
#define rt_is_reterr(i) (i >= RTE_LOWERR) #define rt_is_reterr(i) (i >= RTE_LOWERR)
#define RT_IRQ_TASK 0 #define RT_IRQ_TASK 0
#define RT_IRQ_TASKLET 1 #define RT_IRQ_TASKLET 1
#define RT_IRQ_TASK_ERR 0x7FFFFFFF #define RT_IRQ_TASK_ERR 0x7FFFFFFF
@ -270,13 +270,13 @@ int rt_kthread_init(struct rt_task_struct *task,
void(*signal)(void)); void(*signal)(void));
int rt_kthread_init_cpuid(struct rt_task_struct *task, int rt_kthread_init_cpuid(struct rt_task_struct *task,
void (*rt_thread)(long), void (*rt_thread)(long),
long data, long data,
int stack_size, int stack_size,
int priority, int priority,
int uses_fpu, int uses_fpu,
void(*signal)(void), void(*signal)(void),
unsigned run_on_cpu); unsigned run_on_cpu);
RTAI_SYSCALL_MODE void rt_set_runnable_on_cpus(struct rt_task_struct *task, RTAI_SYSCALL_MODE void rt_set_runnable_on_cpus(struct rt_task_struct *task,
unsigned long cpu_mask); unsigned long cpu_mask);
@ -459,33 +459,33 @@ RT_TRAP_HANDLER rt_set_task_trap_handler(struct rt_task_struct *task,
static inline RTIME timeval2count(struct timeval *t) static inline RTIME timeval2count(struct timeval *t)
{ {
return nano2count(t->tv_sec*1000000000LL + t->tv_usec*1000); return nano2count(t->tv_sec*1000000000LL + t->tv_usec*1000);
} }
static inline void count2timeval(RTIME rt, struct timeval *t) static inline void count2timeval(RTIME rt, struct timeval *t)
{ {
t->tv_sec = rtai_ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_usec); t->tv_sec = rtai_ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_usec);
t->tv_usec /= 1000; t->tv_usec /= 1000;
} }
static inline RTIME timespec2count(const struct timespec *t) static inline RTIME timespec2count(const struct timespec *t)
{ {
return nano2count(t->tv_sec*1000000000LL + t->tv_nsec); return nano2count(t->tv_sec*1000000000LL + t->tv_nsec);
} }
static inline void count2timespec(RTIME rt, struct timespec *t) static inline void count2timespec(RTIME rt, struct timespec *t)
{ {
t->tv_sec = rtai_ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_nsec); t->tv_sec = rtai_ulldiv(count2nano(rt), 1000000000, (unsigned long *)&t->tv_nsec);
} }
static inline RTIME timespec2nanos(const struct timespec *t) static inline RTIME timespec2nanos(const struct timespec *t)
{ {
return t->tv_sec*1000000000LL + t->tv_nsec; return t->tv_sec*1000000000LL + t->tv_nsec;
} }
static inline void nanos2timespec(RTIME rt, struct timespec *t) static inline void nanos2timespec(RTIME rt, struct timespec *t)
{ {
t->tv_sec = rtai_ulldiv(rt, 1000000000, (unsigned long *)&t->tv_nsec); t->tv_sec = rtai_ulldiv(rt, 1000000000, (unsigned long *)&t->tv_nsec);
} }
void rt_make_hard_real_time(RT_TASK *task); void rt_make_hard_real_time(RT_TASK *task);

View file

@ -47,9 +47,7 @@
#include <rtai_scb.h> #include <rtai_scb.h>
#include <rtai_mbx.h> #include <rtai_mbx.h>
#include <rtai_msg.h> #include <rtai_msg.h>
#include <rtai_tbx.h>
#include <rtai_mq.h> #include <rtai_mq.h>
#include <rtai_bits.h>
#include <rtai_tasklets.h> #include <rtai_tasklets.h>
#include <rtai_fifos.h> #include <rtai_fifos.h>
#include <rtai_netrpc.h> #include <rtai_netrpc.h>
@ -108,14 +106,14 @@ extern volatile int rt_sched_timed;
#ifdef CONFIG_RTAI_MALLOC_BUILTIN #ifdef CONFIG_RTAI_MALLOC_BUILTIN
#define sched_mem_init() \ #define sched_mem_init() \
{ if(__rtai_heap_init() != 0) { \ { if(__rtai_heap_init() != 0) { \
return(-ENOMEM); \ return(-ENOMEM); \
} } } }
#define sched_mem_end() __rtai_heap_exit() #define sched_mem_end() __rtai_heap_exit()
#else /* CONFIG_RTAI_MALLOC_BUILTIN */ #else /* CONFIG_RTAI_MALLOC_BUILTIN */
#define sched_mem_init() #define sched_mem_init()
#define sched_mem_end() #define sched_mem_end()
#endif /* !CONFIG_RTAI_MALLOC_BUILTIN */ #endif /* !CONFIG_RTAI_MALLOC_BUILTIN */
#define call_exit_handlers(task) __call_exit_handlers(task) #define call_exit_handlers(task) __call_exit_handlers(task)
#define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2) #define set_exit_handler(task, fun, arg1, arg2) __set_exit_handler(task, fun, arg1, arg2)
#else /* !CONFIG_RTAI_MALLOC */ #else /* !CONFIG_RTAI_MALLOC */
#define sched_mem_init() #define sched_mem_init()
@ -201,7 +199,7 @@ static inline void send_sched_ipi(unsigned long dest)
#define RT_SCHEDULE_MAP(schedmap) rt_schedule() #define RT_SCHEDULE_MAP(schedmap) rt_schedule()
#define RT_SCHEDULE(task, cpuid) rt_schedule() #define RT_SCHEDULE(task, cpuid) rt_schedule()
#define RT_SCHEDULE_BOTH(task, cpuid) rt_schedule() #define RT_SCHEDULE_BOTH(task, cpuid) rt_schedule()
@ -368,8 +366,8 @@ static inline void enq_timed_task(RT_TASK *timed_task)
static inline void rem_timed_task(RT_TASK *task) static inline void rem_timed_task(RT_TASK *task)
{ {
if ((task->state & RT_SCHED_DELAYED)) { if ((task->state & RT_SCHED_DELAYED)) {
(task->tprev)->tnext = task->tnext; (task->tprev)->tnext = task->tnext;
(task->tnext)->tprev = task->tprev; (task->tnext)->tprev = task->tprev;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rb_erase_task(task, task->runnable_on_cpus); rb_erase_task(task, task->runnable_on_cpus);
#else #else
@ -391,16 +389,16 @@ static inline void wake_up_timed_tasks(int cpuid)
if ((task->state & RT_SCHED_SUSPENDED) && task->suspdepth > 0) { if ((task->state & RT_SCHED_SUSPENDED) && task->suspdepth > 0) {
task->suspdepth = 0; task->suspdepth = 0;
} }
if ((task->state &= ~(RT_SCHED_DELAYED | RT_SCHED_SUSPENDED | RT_SCHED_SEMAPHORE | RT_SCHED_RECEIVE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_MBXSUSP | RT_SCHED_POLL)) == RT_SCHED_READY) { if ((task->state &= ~(RT_SCHED_DELAYED | RT_SCHED_SUSPENDED | RT_SCHED_SEMAPHORE | RT_SCHED_RECEIVE | RT_SCHED_SEND | RT_SCHED_RPC | RT_SCHED_RETURN | RT_SCHED_MBXSUSP | RT_SCHED_POLL)) == RT_SCHED_READY) {
if (task->policy < 0) { if (task->policy < 0) {
enq_ready_edf_task(task); enq_ready_edf_task(task);
} else { } else {
enq_ready_task(task); enq_ready_task(task);
} }
#if defined(CONFIG_RTAI_BUSY_TIME_ALIGN) && CONFIG_RTAI_BUSY_TIME_ALIGN #if defined(CONFIG_RTAI_BUSY_TIME_ALIGN) && CONFIG_RTAI_BUSY_TIME_ALIGN
task->busy_time_align = oneshot_timer; task->busy_time_align = oneshot_timer;
#endif #endif
} }
rb_erase_task(task, cpuid); rb_erase_task(task, cpuid);
task = task->tnext; task = task->tnext;
} while (task->resume_time <= rt_time_h); } while (task->resume_time <= rt_time_h);
@ -429,35 +427,35 @@ static inline RTIME get_time(void)
static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype) static inline void enqueue_blocked(RT_TASK *task, QUEUE *queue, int qtype)
{ {
QUEUE *q; QUEUE *q;
task->blocked_on = (q = queue); task->blocked_on = (q = queue);
if (!qtype) { if (!qtype) {
while ((q = q->next) != queue && (q->task)->priority <= task->priority); while ((q = q->next) != queue && (q->task)->priority <= task->priority);
} }
q->prev = (task->queue.prev = q->prev)->next = &(task->queue); q->prev = (task->queue.prev = q->prev)->next = &(task->queue);
task->queue.next = q; task->queue.next = q;
} }
static inline void dequeue_blocked(RT_TASK *task) static inline void dequeue_blocked(RT_TASK *task)
{ {
task->prio_passed_to = NULL; task->prio_passed_to = NULL;
(task->queue.prev)->next = task->queue.next; (task->queue.prev)->next = task->queue.next;
(task->queue.next)->prev = task->queue.prev; (task->queue.next)->prev = task->queue.prev;
task->blocked_on = NULL; task->blocked_on = NULL;
} }
static inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from) static inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
{ {
QUEUE *q, *blocked_on; QUEUE *q, *blocked_on;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
RT_TASK *rhead; RT_TASK *rhead;
unsigned long schedmap; unsigned long schedmap;
schedmap = 0; schedmap = 0;
#endif #endif
// from->prio_passed_to = to; // from->prio_passed_to = to;
while (to && to->priority > from->priority) { while (to && to->priority > from->priority) {
to->priority = from->priority; to->priority = from->priority;
if (to->state == RT_SCHED_READY) { if (to->state == RT_SCHED_READY) {
if ((to->rprev)->priority > to->priority || (to->rnext)->priority < to->priority) { if ((to->rprev)->priority > to->priority || (to->rnext)->priority < to->priority) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -487,7 +485,7 @@ static inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
} }
} }
to = (to->state & RT_SCHED_SEMAPHORE) ? ((SEM *)blocked_on)->owndby : blocked_on->task; to = (to->state & RT_SCHED_SEMAPHORE) ? ((SEM *)blocked_on)->owndby : blocked_on->task;
} }
// to = to->prio_passed_to; // to = to->prio_passed_to;
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -500,14 +498,14 @@ static inline unsigned long pass_prio(RT_TASK *to, RT_TASK *from)
static inline RT_TASK *_rt_whoami(void) static inline RT_TASK *_rt_whoami(void)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
RT_TASK *rt_current; RT_TASK *rt_current;
unsigned long flags; unsigned long flags;
flags = rt_global_save_flags_and_cli(); flags = rt_global_save_flags_and_cli();
rt_current = RT_CURRENT; rt_current = RT_CURRENT;
rt_global_restore_flags(flags); rt_global_restore_flags(flags);
return rt_current; return rt_current;
#else #else
return rt_smp_current[0]; return rt_smp_current[0];
#endif #endif
} }
@ -543,9 +541,6 @@ static inline XHDL *__set_exit_handler(RT_TASK *task, void (*fun) (void *, int),
static inline int rtai_init_features (void) static inline int rtai_init_features (void)
{ {
#ifdef CONFIG_RTAI_LEDS_BUILTIN
__rtai_leds_init();
#endif /* CONFIG_RTAI_LEDS_BUILTIN */
#ifdef CONFIG_RTAI_SEM_BUILTIN #ifdef CONFIG_RTAI_SEM_BUILTIN
__rtai_sem_init(); __rtai_sem_init();
#endif /* CONFIG_RTAI_SEM_BUILTIN */ #endif /* CONFIG_RTAI_SEM_BUILTIN */
@ -555,15 +550,9 @@ static inline int rtai_init_features (void)
#ifdef CONFIG_RTAI_MBX_BUILTIN #ifdef CONFIG_RTAI_MBX_BUILTIN
__rtai_mbx_init(); __rtai_mbx_init();
#endif /* CONFIG_RTAI_MBX_BUILTIN */ #endif /* CONFIG_RTAI_MBX_BUILTIN */
#ifdef CONFIG_RTAI_TBX_BUILTIN
__rtai_msg_queue_init();
#endif /* CONFIG_RTAI_TBX_BUILTIN */
#ifdef CONFIG_RTAI_MQ_BUILTIN #ifdef CONFIG_RTAI_MQ_BUILTIN
__rtai_mq_init(); __rtai_mq_init();
#endif /* CONFIG_RTAI_MQ_BUILTIN */ #endif /* CONFIG_RTAI_MQ_BUILTIN */
#ifdef CONFIG_RTAI_BITS_BUILTIN
__rtai_bits_init();
#endif /* CONFIG_RTAI_BITS_BUILTIN */
#ifdef CONFIG_RTAI_TASKLETS_BUILTIN #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
__rtai_tasklets_init(); __rtai_tasklets_init();
#endif /* CONFIG_RTAI_TASKLETS_BUILTIN */ #endif /* CONFIG_RTAI_TASKLETS_BUILTIN */
@ -579,9 +568,6 @@ static inline int rtai_init_features (void)
#ifdef CONFIG_RTAI_MATH_BUILTIN #ifdef CONFIG_RTAI_MATH_BUILTIN
__rtai_math_init(); __rtai_math_init();
#endif /* CONFIG_RTAI_MATH_BUILTIN */ #endif /* CONFIG_RTAI_MATH_BUILTIN */
#ifdef CONFIG_RTAI_USI
printk(KERN_INFO "RTAI[usi]: enabled.\n");
#endif /* CONFIG_RTAI_USI */
return 0; return 0;
} }
@ -603,15 +589,9 @@ static inline void rtai_cleanup_features (void) {
#ifdef CONFIG_RTAI_TASKLETS_BUILTIN #ifdef CONFIG_RTAI_TASKLETS_BUILTIN
__rtai_tasklets_exit(); __rtai_tasklets_exit();
#endif /* CONFIG_RTAI_TASKLETS_BUILTIN */ #endif /* CONFIG_RTAI_TASKLETS_BUILTIN */
#ifdef CONFIG_RTAI_BITS_BUILTIN
__rtai_bits_exit();
#endif /* CONFIG_RTAI_BITS_BUILTIN */
#ifdef CONFIG_RTAI_MQ_BUILTIN #ifdef CONFIG_RTAI_MQ_BUILTIN
__rtai_mq_exit(); __rtai_mq_exit();
#endif /* CONFIG_RTAI_MQ_BUILTIN */ #endif /* CONFIG_RTAI_MQ_BUILTIN */
#ifdef CONFIG_RTAI_TBX_BUILTIN
__rtai_msg_queue_exit();
#endif /* CONFIG_RTAI_TBX_BUILTIN */
#ifdef CONFIG_RTAI_MBX_BUILTIN #ifdef CONFIG_RTAI_MBX_BUILTIN
__rtai_mbx_exit(); __rtai_mbx_exit();
#endif /* CONFIG_RTAI_MBX_BUILTIN */ #endif /* CONFIG_RTAI_MBX_BUILTIN */
@ -621,9 +601,6 @@ static inline void rtai_cleanup_features (void) {
#ifdef CONFIG_RTAI_SEM_BUILTIN #ifdef CONFIG_RTAI_SEM_BUILTIN
__rtai_sem_exit(); __rtai_sem_exit();
#endif /* CONFIG_RTAI_SEM_BUILTIN */ #endif /* CONFIG_RTAI_SEM_BUILTIN */
#ifdef CONFIG_RTAI_LEDS_BUILTIN
__rtai_leds_exit();
#endif /* CONFIG_RTAI_LEDS_BUILTIN */
} }
int rt_check_current_stack(void); int rt_check_current_stack(void);
@ -647,30 +624,6 @@ int rt_kthread_init_cpuid(RT_TASK *task,
#else /* !__KERNEL__ */ #else /* !__KERNEL__ */
#if 0
#include <rtai_version.h>
#include <rtai_lxrt.h>
#include <rtai_sched.h>
#include <rtai_malloc.h>
#include <rtai_trace.h>
#include <rtai_leds.h>
#include <rtai_sem.h>
#include <rtai_rwl.h>
#include <rtai_spl.h>
#include <rtai_scb.h>
#include <rtai_mbx.h>
#include <rtai_msg.h>
#include <rtai_tbx.h>
#include <rtai_mq.h>
#include <rtai_bits.h>
#include <rtai_wd.h>
#include <rtai_tasklets.h>
#include <rtai_fifos.h>
#include <rtai_netrpc.h>
#include <rtai_shm.h>
#include <rtai_usi.h>
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* !_RTAI_SCHEDCORE_H */ #endif /* !_RTAI_SCHEDCORE_H */

View file

@ -240,18 +240,18 @@ static inline int rt_pmutex_unlock(pmutex_t *mutex) {
} }
#undef rt_mutex_init #undef rt_mutex_init
#define rt_mutex_init(mtx) rt_typed_sem_init(mtx, 1, RES_SEM) #define rt_mutex_init(mtx) rt_typed_sem_init(mtx, 1, RES_SEM)
#define rt_mutex_delete(mtx) rt_sem_delete(mtx) #define rt_mutex_delete(mtx) rt_sem_delete(mtx)
#define rt_mutex_destroy(mtx) rt_sem_delete(mtx) #define rt_mutex_destroy(mtx) rt_sem_delete(mtx)
#define rt_mutex_trylock(mtx) rt_sem_wait_if(mtx) #define rt_mutex_trylock(mtx) rt_sem_wait_if(mtx)
#define rt_mutex_lock(mtx) rt_sem_wait(mtx) #define rt_mutex_lock(mtx) rt_sem_wait(mtx)
#define rt_mutex_timedlock(mtx, time) rt_sem_wait_until(mtx, time) #define rt_mutex_timedlock(mtx, time) rt_sem_wait_until(mtx, time)
#define rt_mutex_unlock(mtx) rt_sem_signal(mtx) #define rt_mutex_unlock(mtx) rt_sem_signal(mtx)
#define rt_cond_init(cnd) rt_typed_sem_init(cnd, 0, BIN_SEM | PRIO_Q) #define rt_cond_init(cnd) rt_typed_sem_init(cnd, 0, BIN_SEM | PRIO_Q)
#define rt_cond_delete(cnd) rt_sem_delete(cnd) #define rt_cond_delete(cnd) rt_sem_delete(cnd)
#define rt_cond_destroy(cnd) rt_sem_delete(cnd) #define rt_cond_destroy(cnd) rt_sem_delete(cnd)
#define rt_cond_broadcast(cnd) rt_sem_broadcast(cnd) #define rt_cond_broadcast(cnd) rt_sem_broadcast(cnd)
static inline int rt_cond_timedwait(CND *cnd, SEM *mtx, RTIME time) { static inline int rt_cond_timedwait(CND *cnd, SEM *mtx, RTIME time) {
return rt_cond_wait_until(cnd, mtx, time) < SEM_TIMOUT ? 0 : -1; return rt_cond_wait_until(cnd, mtx, time) < SEM_TIMOUT ? 0 : -1;
@ -378,10 +378,10 @@ RTAI_PROTO(int, rt_sem_count,(SEM *sem))
* @return a pointer to the condition variable to be used in related calls or 0 * @return a pointer to the condition variable to be used in related calls or 0
* if an error has occured. * if an error has occured.
*/ */
#define rt_cond_init(name) rt_typed_sem_init(name, 0, BIN_SEM) #define rt_cond_init(name) rt_typed_sem_init(name, 0, BIN_SEM)
#define rt_cond_delete(cnd) rt_sem_delete(cnd) #define rt_cond_delete(cnd) rt_sem_delete(cnd)
#define rt_cond_destroy(cnd) rt_sem_delete(cnd) #define rt_cond_destroy(cnd) rt_sem_delete(cnd)
#define rt_cond_broadcast(cnd) rt_sem_broadcast(cnd) #define rt_cond_broadcast(cnd) rt_sem_broadcast(cnd)
#define rt_cond_timedwait(cnd, mtx, time) rt_cond_wait_until(cnd, mtx, time) #define rt_cond_timedwait(cnd, mtx, time) rt_cond_wait_until(cnd, mtx, time)
RTAI_PROTO(int, rt_cond_signal,(CND *cnd)) RTAI_PROTO(int, rt_cond_signal,(CND *cnd))

View file

@ -490,7 +490,7 @@ RTAI_PROTO(void, rt_named_free, (void *addr))
#define rt_heap_close(name, adr) rt_shm_free(name) #define rt_heap_close(name, adr) rt_shm_free(name)
// aliases in use already, different heads different choices // aliases in use already, different heads different choices
#define rt_heap_init rt_heap_open #define rt_heap_init rt_heap_open
#define rt_heap_create rt_heap_open #define rt_heap_create rt_heap_open
#define rt_heap_acquire rt_heap_open #define rt_heap_acquire rt_heap_open
#define rt_heap_init_adr rt_heap_open_adr #define rt_heap_init_adr rt_heap_open_adr

View file

@ -78,7 +78,7 @@ RTAI_PROTO(int, rt_spl_delete,(struct rtai_spl *spl))
RTAI_PROTO(struct rtai_spl *, rt_named_spl_init,(const char *name)) RTAI_PROTO(struct rtai_spl *, rt_named_spl_init,(const char *name))
{ {
struct { unsigned long name; } arg = { nam2num(name) }; struct { unsigned long name; } arg = { nam2num(name) };
return (struct rtai_spl *)rtai_lxrt(BIDX, SIZARG, NAMED_SPL_INIT, &arg).v[LOW]; return (struct rtai_spl *)rtai_lxrt(BIDX, SIZARG, NAMED_SPL_INIT, &arg).v[LOW];
} }

View file

@ -367,7 +367,7 @@ RTAI_PROTO(struct rt_tasklet_struct *, rt_init_tasklet, (void))
if ((arg.thread = rt_thread_create((void *)support_tasklet, &arg.tasklet, TASKLET_STACK_SIZE))) { if ((arg.thread = rt_thread_create((void *)support_tasklet, &arg.tasklet, TASKLET_STACK_SIZE))) {
int i; int i;
#define POLLS_PER_SEC 100 #define POLLS_PER_SEC 100
for (i = 0; i < POLLS_PER_SEC/5 && !arg.done; i++) { for (i = 0; i < POLLS_PER_SEC/5 && !arg.done; i++) {
struct timespec delay = { 0, 1000000000/POLLS_PER_SEC }; struct timespec delay = { 0, 1000000000/POLLS_PER_SEC };
nanosleep(&delay, NULL); nanosleep(&delay, NULL);
} }

View file

@ -1,326 +0,0 @@
/*
* Copyright (C) 2005 Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#ifndef _RTAI_RT_MSGQ_H
#define _RTAI_RT_MSGQ_H
#include <linux/version.h>
#include <rtai_sem.h>
#define MSGQ_INIT TBX_INIT
#define MSGQ_DELETE TBX_DELETE
#define NAMED_MSGQ_INIT NAMED_TBX_INIT
#define NAMED_MSGQ_DELETE NAMED_TBX_DELETE
#define MSG_SEND TBX_SEND
#define MSG_SEND_IF TBX_SEND_IF
#define MSG_SEND_UNTIL TBX_SEND_UNTIL
#define MSG_SEND_TIMED TBX_SEND_TIMED
#define MSG_RECEIVE TBX_RECEIVE
#define MSG_RECEIVE_IF TBX_RECEIVE_IF
#define MSG_RECEIVE_UNTIL TBX_RECEIVE_UNTIL
#define MSG_RECEIVE_TIMED TBX_RECEIVE_TIMED
#define MSG_BROADCAST TBX_BROADCAST
#define MSG_BROADCAST_IF TBX_BROADCAST_IF
#define MSG_BROADCAST_UNTIL TBX_BROADCAST_UNTIL
#define MSG_BROADCAST_TIMED TBX_BROADCAST_TIMED
#define MSG_EVDRP TBX_URGENT
#define TBX RT_MSGQ
#ifdef __KERNEL__
typedef struct rt_msgh {
void *malloc;
int broadcast;
int size;
int priority;
void *next;
} RT_MSGH;
#define RT_MSGH_SIZE (sizeof(RT_MSGH))
typedef struct rt_msg {
RT_MSGH hdr;
char msg[1];
} RT_MSG;
typedef struct rt_msgq {
int nmsg;
int fastsize;
int slot;
void **slots;
void *firstmsg;
SEM receivers, senders;
SEM received, freslots;
SEM broadcast;
spinlock_t lock;
} RT_MSGQ;
#include <linux/types.h>
#ifdef __cplusplus
extern "C" {
#endif /* !__cplusplus */
int __rtai_msg_queue_init(void);
void __rtai_msg_queue_exit(void);
RTAI_SYSCALL_MODE int rt_msgq_init(RT_MSGQ *msgq, int nmsg, int msg_size);
RTAI_SYSCALL_MODE int rt_msgq_delete(RT_MSGQ *msgq);
RTAI_SYSCALL_MODE RT_MSGQ *_rt_named_msgq_init(unsigned long msgq_name, int nmsg, int size);
static inline RT_MSGQ *rt_named_msgq_init(const char *msgq_name, int nmsg, int size)
{
return _rt_named_msgq_init(nam2num(msgq_name), nmsg, size);
}
RTAI_SYSCALL_MODE int rt_named_msgq_delete(RT_MSGQ *msgq);
RTAI_SYSCALL_MODE int _rt_msg_send(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, int space);
static inline int rt_msg_send(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri)
{
return _rt_msg_send(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_send_if(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, int space);
static inline int rt_msg_send_if(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri)
{
return _rt_msg_send_if(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_send_until(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME until, int space);
static inline int rt_msg_send_until(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME until)
{
return _rt_msg_send_until(msgq, msg, msg_size, msgpri, until, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_send_timed(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME delay, int space);
static inline int rt_msg_send_timed(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME delay)
{
return _rt_msg_send_timed(msgq, msg, msg_size, msgpri, delay, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_receive(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, int space);
static inline int rt_msg_receive(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri)
{
return _rt_msg_receive(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_receive_if(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, int space);
static inline int rt_msg_receive_if(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri)
{
return _rt_msg_receive_if(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_receive_until(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, RTIME until, int space);
static inline int rt_msg_receive_until(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, RTIME until)
{
return _rt_msg_receive_until(msgq, msg, msg_size, msgpri, until, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_receive_timed(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, RTIME delay, int space);
static inline int rt_msg_receive_timed(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, RTIME delay)
{
return _rt_msg_receive_timed(msgq, msg, msg_size, msgpri, delay, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_evdrp(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri, int space);
static inline int rt_msg_evdrp(RT_MSGQ *msgq, void *msg, int msg_size, int *msgpri)
{
return _rt_msg_evdrp(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_broadcast(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, int space);
static inline int rt_msg_broadcast(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri)
{
return _rt_msg_broadcast(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_broadcast_if(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, int space);
static inline int rt_msg_broadcast_if(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri)
{
return _rt_msg_broadcast_if(msgq, msg, msg_size, msgpri, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_broadcast_until(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME until, int space);
static inline int rt_msg_broadcast_until(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME until)
{
return _rt_msg_broadcast_until(msgq, msg, msg_size, msgpri, until, 1);
}
RTAI_SYSCALL_MODE int _rt_msg_broadcast_timed(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME delay, int space);
static inline int rt_msg_broadcast_delay(RT_MSGQ *msgq, void *msg, int msg_size, int msgpri, RTIME delay)
{
return _rt_msg_broadcast_until(msgq, msg, msg_size, msgpri, delay, 1);
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#else /* !__KERNEL__ */
#include <signal.h>
#include <rtai_lxrt.h>
struct rt_msgh;
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef struct rt_msgq {
int dummy;
} RT_MSGQ;
RTAI_PROTO(RT_MSGQ *, rt_msgq_init, (unsigned long msgq, int nmsg, int msg_size))
{
struct { unsigned long msgq; long nmsg; long msg_size; } arg = { msgq, nmsg, msg_size };
return rtai_lxrt(BIDX, SIZARG, NAMED_MSGQ_INIT, &arg).v[LOW];
}
RTAI_PROTO(int, rt_msgq_delete, (RT_MSGQ *msgq))
{
struct { RT_MSGQ *msgq; } arg = { msgq };
return rtai_lxrt(BIDX, SIZARG, NAMED_MSGQ_DELETE, &arg).i[LOW];
}
RTAI_PROTO(RT_MSGQ *, rt_named_msgq_init,(const char *name, int nmsg, int size))
{
struct { unsigned long name; long nmsg; long size; } arg = { nam2num(name), nmsg, size };
return rtai_lxrt(BIDX, SIZARG, NAMED_MSGQ_INIT, &arg).v[LOW];
}
RTAI_PROTO(int, rt_named_msgq_delete, (RT_MSGQ *msgq))
{
struct { RT_MSGQ *msgq; } arg = { msgq };
return rtai_lxrt(BIDX, SIZARG, NAMED_MSGQ_DELETE, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_send, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_SEND, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_send_if, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_SEND_IF, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_send_until, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio, RTIME until))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; RTIME until; long space; } arg = { msgq, msg, msg_size, msgprio, until, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_SEND_UNTIL, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_send_timed, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio, RTIME delay))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; RTIME delay; long space; } arg = { msgq, msg, msg_size, msgprio, delay, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_SEND_TIMED, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_receive, (RT_MSGQ *msgq, void *msg, int msg_size, int *msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; int *msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_RECEIVE, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_receive_if, (RT_MSGQ *msgq, void *msg, int msg_size, int *msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; int *msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_RECEIVE_IF, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_receive_until, (RT_MSGQ *msgq, void *msg, int msg_size, int *msgprio, RTIME until))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; int *msgprio; RTIME until; long space; } arg = { msgq, msg, msg_size, msgprio, until, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_RECEIVE_UNTIL, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_receive_timed, (RT_MSGQ *msgq, void *msg, int msg_size, int *msgprio, RTIME delay))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; int *msgprio; RTIME delay; long space; } arg = { msgq, msg, msg_size, msgprio, delay, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_RECEIVE_TIMED, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_evdrp, (RT_MSGQ *msgq, void *msg, int msg_size, int *msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; int *msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_EVDRP, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_broadcast, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_BROADCAST, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_broadcast_if, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; long space; } arg = { msgq, msg, msg_size, msgprio, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_BROADCAST_IF, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_broadcast_until, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio, RTIME until))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; RTIME until; long space; } arg = { msgq, msg, msg_size, msgprio, until, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_BROADCAST_UNTIL, &arg).i[LOW];
}
RTAI_PROTO(int, rt_msg_broadcast_timed, (RT_MSGQ *msgq, void *msg, int msg_size, int msgprio, RTIME delay))
{
struct { RT_MSGQ *msgq; void *msg; long msg_size; long msgprio; RTIME delay; long space; } arg = { msgq, msg, msg_size, msgprio, delay, 0 };
return rtai_lxrt(BIDX, SIZARG, MSG_BROADCAST_TIMED, &arg).i[LOW];
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __KERNEL__ */
#define rt_tbx_init(tbx, size, flags) rt_msgq_init(tbx, size, 0)
#define rt_tbx_delete(tbx) rt_msgq_delete(tbx)
#define rt_tbx_send(tbx, msg, msg_size) rt_msg_send(tbx, msg, msg_size, 1)
#define rt_tbx_send_if(tbx, msg, msg_size) rt_msg_send_if(tbx, msg, msg_size, 1)
#define rt_tbx_send_until(tbx, msg, msg_size, until) rt_msg_send_until(tbx, msg, msg_size, 1, until)
#define rt_tbx_send_timed(tbx, msg, msg_size, delay) rt_msg_send_timed(tbx, msg, msg_size, 1, delay)
#define rt_tbx_receive(tbx, msg, msg_size) rt_msg_receive(tbx, msg, msg_size, 0)
#define rt_tbx_receive_if(tbx, msg, msg_size) rt_msg_receive_if(tbx, msg, msg_size, 0)
#define rt_tbx_receive_until(tbx, msg, msg_size, until) rt_msg_receive_until(tbx, msg, msg_size, 0, until)
#define rt_tbx_receive_timed(tbx, msg, msg_size, delay) rt_msg_receive_timed(tbx, msg, msg_size, 0, delay)
#define rt_tbx_broadcast(tbx, msg, msg_size) rt_msg_broadcast(tbx, msg, msg_size, 0)
#define rt_tbx_broadcast_if(tbx, msg, msg_size) rt_msg_broadcast_if(tbx, msg, msg_size, 0)
#define rt_tbx_broadcast_until(tbx, msg, msg_size, until) rt_msg_broadcast_until(tbx, msg, msg_size, 0, until)
#define rt_tbx_broadcast_timed(tbx, msg, msg_size, delay) rt_msg_broadcast_timed(tbx, msg, msg_size, 0, delay)
#define rt_tbx_urgent(tbx, msg, msg_size) rt_msg_send(tbx, msg, msg_size, 0)
#define rt_tbx_urgent_if(tbx, msg, msg_size) rt_msg_send_if(tbx, msg, msg_size, 0)
#define rt_tbx_urgent_until(tbx, msg, msg_size, until) rt_msg_send_until(tbx, msg, msg_size, 0, until)
#define rt_tbx_urgent_timed(tbx, msg, msg_size, delay) rt_msg_send_timed(tbx, msg, msg_size, 0, delay)
#endif /* !_RTAI_RT_MSGQ_H */

View file

@ -36,7 +36,7 @@ int rt_unregister_tracer
(tracer_call /* The tracer function */); (tracer_call /* The tracer function */);
int rt_trace_event int rt_trace_event
(uint8_t /* Event ID (as defined in this header file) */, (uint8_t /* Event ID (as defined in this header file) */,
void* /* Structure describing the event */); void* /* Structure describing the event */);
/* Generic macros */ /* Generic macros */
#define RT_TRACE_EVENT(ID, DATA) rt_trace_event(ID, DATA) #define RT_TRACE_EVENT(ID, DATA) rt_trace_event(ID, DATA)
@ -44,33 +44,33 @@ int rt_trace_event
#define TRACE_RTAI_START TRACE_EV_MAX #define TRACE_RTAI_START TRACE_EV_MAX
/* Traced events */ /* Traced events */
#define TRACE_RTAI_EV_MOUNT TRACE_RTAI_START + 1 /* The RTAI subsystem was mounted */ #define TRACE_RTAI_EV_MOUNT TRACE_RTAI_START + 1 /* The RTAI subsystem was mounted */
#define TRACE_RTAI_EV_UMOUNT TRACE_RTAI_START + 2 /* The RTAI subsystem was unmounted */ #define TRACE_RTAI_EV_UMOUNT TRACE_RTAI_START + 2 /* The RTAI subsystem was unmounted */
#define TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY TRACE_RTAI_START + 3 /* Entry in a global IRQ */ #define TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY TRACE_RTAI_START + 3 /* Entry in a global IRQ */
#define TRACE_RTAI_EV_GLOBAL_IRQ_EXIT TRACE_RTAI_START + 4 /* Exit from a global IRQ */ #define TRACE_RTAI_EV_GLOBAL_IRQ_EXIT TRACE_RTAI_START + 4 /* Exit from a global IRQ */
#define TRACE_RTAI_EV_OWN_IRQ_ENTRY TRACE_RTAI_START + 5 /* Entry in a CPU own IRQ */ #define TRACE_RTAI_EV_OWN_IRQ_ENTRY TRACE_RTAI_START + 5 /* Entry in a CPU own IRQ */
#define TRACE_RTAI_EV_OWN_IRQ_EXIT TRACE_RTAI_START + 6 /* Exit from a CPU own IRQ */ #define TRACE_RTAI_EV_OWN_IRQ_EXIT TRACE_RTAI_START + 6 /* Exit from a CPU own IRQ */
#define TRACE_RTAI_EV_TRAP_ENTRY TRACE_RTAI_START + 7 /* Entry in a trap */ #define TRACE_RTAI_EV_TRAP_ENTRY TRACE_RTAI_START + 7 /* Entry in a trap */
#define TRACE_RTAI_EV_TRAP_EXIT TRACE_RTAI_START + 8 /* Exit from a trap */ #define TRACE_RTAI_EV_TRAP_EXIT TRACE_RTAI_START + 8 /* Exit from a trap */
#define TRACE_RTAI_EV_SRQ_ENTRY TRACE_RTAI_START + 9 /* Entry in a SRQ */ #define TRACE_RTAI_EV_SRQ_ENTRY TRACE_RTAI_START + 9 /* Entry in a SRQ */
#define TRACE_RTAI_EV_SRQ_EXIT TRACE_RTAI_START + 10 /* Exit from a SRQ */ #define TRACE_RTAI_EV_SRQ_EXIT TRACE_RTAI_START + 10 /* Exit from a SRQ */
#define TRACE_RTAI_EV_SWITCHTO_LINUX TRACE_RTAI_START + 11 /* Switch a CPU to Linux */ #define TRACE_RTAI_EV_SWITCHTO_LINUX TRACE_RTAI_START + 11 /* Switch a CPU to Linux */
#define TRACE_RTAI_EV_SWITCHTO_RT TRACE_RTAI_START + 12 /* Switch a CPU to real-time */ #define TRACE_RTAI_EV_SWITCHTO_RT TRACE_RTAI_START + 12 /* Switch a CPU to real-time */
#define TRACE_RTAI_EV_SCHED_CHANGE TRACE_RTAI_START + 13 /* A scheduling change has occured */ #define TRACE_RTAI_EV_SCHED_CHANGE TRACE_RTAI_START + 13 /* A scheduling change has occured */
#define TRACE_RTAI_EV_TASK TRACE_RTAI_START + 14 /* Hit key part of task services */ #define TRACE_RTAI_EV_TASK TRACE_RTAI_START + 14 /* Hit key part of task services */
#define TRACE_RTAI_EV_TIMER TRACE_RTAI_START + 15 /* Hit key part of timer services */ #define TRACE_RTAI_EV_TIMER TRACE_RTAI_START + 15 /* Hit key part of timer services */
#define TRACE_RTAI_EV_SEM TRACE_RTAI_START + 16 /* Hit key part of semaphore services */ #define TRACE_RTAI_EV_SEM TRACE_RTAI_START + 16 /* Hit key part of semaphore services */
#define TRACE_RTAI_EV_MSG TRACE_RTAI_START + 17 /* Hit key part of message services */ #define TRACE_RTAI_EV_MSG TRACE_RTAI_START + 17 /* Hit key part of message services */
#define TRACE_RTAI_EV_RPC TRACE_RTAI_START + 18 /* Hit key part of RPC services */ #define TRACE_RTAI_EV_RPC TRACE_RTAI_START + 18 /* Hit key part of RPC services */
#define TRACE_RTAI_EV_MBX TRACE_RTAI_START + 19 /* Hit key part of mail box services */ #define TRACE_RTAI_EV_MBX TRACE_RTAI_START + 19 /* Hit key part of mail box services */
#define TRACE_RTAI_EV_FIFO TRACE_RTAI_START + 20 /* Hit key part of FIFO services */ #define TRACE_RTAI_EV_FIFO TRACE_RTAI_START + 20 /* Hit key part of FIFO services */
#define TRACE_RTAI_EV_SHM TRACE_RTAI_START + 21 /* Hit key part of shared memory services */ #define TRACE_RTAI_EV_SHM TRACE_RTAI_START + 21 /* Hit key part of shared memory services */
#define TRACE_RTAI_EV_POSIX TRACE_RTAI_START + 22 /* Hit key part of Posix services */ #define TRACE_RTAI_EV_POSIX TRACE_RTAI_START + 22 /* Hit key part of Posix services */
#define TRACE_RTAI_EV_LXRT TRACE_RTAI_START + 23 /* Hit key part of LXRT services */ #define TRACE_RTAI_EV_LXRT TRACE_RTAI_START + 23 /* Hit key part of LXRT services */
#define TRACE_RTAI_EV_LXRTI TRACE_RTAI_START + 24 /* Hit key part of LXRT-Informed services */ #define TRACE_RTAI_EV_LXRTI TRACE_RTAI_START + 24 /* Hit key part of LXRT-Informed services */
/* Max number of traced events */ /* Max number of traced events */
#define TRACE_RTAI_EV_MAX TRACE_RTAI_EV_LXRTI #define TRACE_RTAI_EV_MAX TRACE_RTAI_EV_LXRTI
/* Structures and macros for traced events */ /* Structures and macros for traced events */
/* TRACE_RTAI_MOUNT */ /* TRACE_RTAI_MOUNT */
@ -87,26 +87,26 @@ typedef struct _trace_rtai_global_irq_entry
} LTT_PACKED_STRUCT trace_rtai_global_irq_entry; } LTT_PACKED_STRUCT trace_rtai_global_irq_entry;
#if CONFIG_X86 #if CONFIG_X86
#define TRACE_RTAI_GLOBAL_IRQ_ENTRY(ID, __dummy) \ #define TRACE_RTAI_GLOBAL_IRQ_ENTRY(ID, __dummy) \
do \ do \
{\ {\
uint32_t eflags, xcs; \ uint32_t eflags, xcs; \
trace_rtai_global_irq_entry irq_entry;\ trace_rtai_global_irq_entry irq_entry;\
irq_entry.irq_id = ID;\ irq_entry.irq_id = ID;\
__asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \ __asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \
__asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \ __asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \
irq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\ irq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\
rt_trace_event(TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY, &irq_entry);\ rt_trace_event(TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY, &irq_entry);\
} while(0) } while(0)
#endif #endif
#if CONFIG_PPC #if CONFIG_PPC
#define TRACE_RTAI_GLOBAL_IRQ_ENTRY(ID, KERNEL) \ #define TRACE_RTAI_GLOBAL_IRQ_ENTRY(ID, KERNEL) \
do \ do \
{\ {\
trace_rtai_global_irq_entry irq_entry;\ trace_rtai_global_irq_entry irq_entry;\
irq_entry.irq_id = ID;\ irq_entry.irq_id = ID;\
irq_entry.kernel = KERNEL;\ irq_entry.kernel = KERNEL;\
rt_trace_event(TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY, &irq_entry);\ rt_trace_event(TRACE_RTAI_EV_GLOBAL_IRQ_ENTRY, &irq_entry);\
} while(0) } while(0)
#endif #endif
/* TRACE_RTAI_GLOBAL_IRQ_EXIT */ /* TRACE_RTAI_GLOBAL_IRQ_EXIT */
@ -120,26 +120,26 @@ typedef struct _trace_rtai_own_irq_entry
} LTT_PACKED_STRUCT trace_rtai_own_irq_entry; } LTT_PACKED_STRUCT trace_rtai_own_irq_entry;
#if CONFIG_X86 #if CONFIG_X86
#define TRACE_RTAI_OWN_IRQ_ENTRY(ID) \ #define TRACE_RTAI_OWN_IRQ_ENTRY(ID) \
do \ do \
{\ {\
uint32_t eflags, xcs; \ uint32_t eflags, xcs; \
trace_rtai_own_irq_entry irq_entry;\ trace_rtai_own_irq_entry irq_entry;\
irq_entry.irq_id = ID;\ irq_entry.irq_id = ID;\
__asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \ __asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \
__asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \ __asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \
irq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\ irq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\
rt_trace_event(TRACE_RTAI_EV_OWN_IRQ_ENTRY, &irq_entry);\ rt_trace_event(TRACE_RTAI_EV_OWN_IRQ_ENTRY, &irq_entry);\
} while(0) } while(0)
#endif #endif
#if CONFIG_PPC #if CONFIG_PPC
#define TRACE_RTAI_OWN_IRQ_ENTRY(ID, KERNEL) \ #define TRACE_RTAI_OWN_IRQ_ENTRY(ID, KERNEL) \
do \ do \
{\ {\
trace_rtai_own_irq_entry irq_entry;\ trace_rtai_own_irq_entry irq_entry;\
irq_entry.irq_id = ID;\ irq_entry.irq_id = ID;\
irq_entry.kernel = KERNEL;\ irq_entry.kernel = KERNEL;\
rt_trace_event(TRACE_RTAI_EV_OWN_IRQ_ENTRY, &irq_entry);\ rt_trace_event(TRACE_RTAI_EV_OWN_IRQ_ENTRY, &irq_entry);\
} while(0) } while(0)
#endif #endif
/* TRACE_RTAI_OWN_IRQ_EXIT */ /* TRACE_RTAI_OWN_IRQ_EXIT */
@ -148,21 +148,21 @@ typedef struct _trace_rtai_own_irq_entry
/* TRACE_RTAI_TRAP_ENTRY */ /* TRACE_RTAI_TRAP_ENTRY */
typedef struct _trace_rtai_trap_entry typedef struct _trace_rtai_trap_entry
{ {
uint8_t trap_id; /* Trap number */ uint8_t trap_id; /* Trap number */
uint32_t address; /* Address where trap occured */ uint32_t address; /* Address where trap occured */
} LTT_PACKED_STRUCT trace_rtai_trap_entry; } LTT_PACKED_STRUCT trace_rtai_trap_entry;
#define TRACE_RTAI_TRAP_ENTRY(ID,ADDR) \ #define TRACE_RTAI_TRAP_ENTRY(ID,ADDR) \
do \ do \
{\ {\
trace_rtai_trap_entry trap_event;\ trace_rtai_trap_entry trap_event;\
trap_event.trap_id = ID;\ trap_event.trap_id = ID;\
trap_event.address = ADDR; \ trap_event.address = ADDR; \
rt_trace_event(TRACE_RTAI_EV_TRAP_ENTRY, &trap_event);\ rt_trace_event(TRACE_RTAI_EV_TRAP_ENTRY, &trap_event);\
} while(0) } while(0)
/* /*
uint32_t eip; \ uint32_t eip; \
__asm__ __volatile__("pushl %%ip; pop %0": "=g" (eip)); \ __asm__ __volatile__("pushl %%ip; pop %0": "=g" (eip)); \
trap_event.address = eip;\ trap_event.address = eip;\
*/ */
/* TRACE_RTAI_TRAP_EXIT */ /* TRACE_RTAI_TRAP_EXIT */
@ -176,26 +176,26 @@ typedef struct _trace_rtai_srq_entry
} LTT_PACKED_STRUCT trace_rtai_srq_entry; } LTT_PACKED_STRUCT trace_rtai_srq_entry;
#if CONFIG_X86 #if CONFIG_X86
#define TRACE_RTAI_SRQ_ENTRY(ID) \ #define TRACE_RTAI_SRQ_ENTRY(ID) \
do \ do \
{\ {\
uint32_t eflags, xcs; \ uint32_t eflags, xcs; \
trace_rtai_srq_entry srq_entry;\ trace_rtai_srq_entry srq_entry;\
srq_entry.srq_id = ID;\ srq_entry.srq_id = ID;\
__asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \ __asm__ __volatile__("pushfl; pop %0": "=g" (eflags)); \
__asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \ __asm__ __volatile__("pushl %%cs; pop %0": "=g" (xcs)); \
srq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\ srq_entry.kernel = !((VM_MASK & eflags) || (3 & xcs));\
rt_trace_event(TRACE_RTAI_EV_SRQ_ENTRY, &srq_entry);\ rt_trace_event(TRACE_RTAI_EV_SRQ_ENTRY, &srq_entry);\
} while(0) } while(0)
#endif #endif
#if CONFIG_PPC || CONFIG_ARM #if CONFIG_PPC || CONFIG_ARM
#define TRACE_RTAI_SRQ_ENTRY(ID,KERNEL) \ #define TRACE_RTAI_SRQ_ENTRY(ID,KERNEL) \
do \ do \
{\ {\
trace_rtai_srq_entry srq_entry;\ trace_rtai_srq_entry srq_entry;\
srq_entry.srq_id = ID;\ srq_entry.srq_id = ID;\
srq_entry.kernel = KERNEL;\ srq_entry.kernel = KERNEL;\
rt_trace_event(TRACE_RTAI_EV_SRQ_ENTRY, &srq_entry);\ rt_trace_event(TRACE_RTAI_EV_SRQ_ENTRY, &srq_entry);\
} while(0) } while(0)
#endif #endif
/* TRACE_RTAI_SRQ_EXIT */ /* TRACE_RTAI_SRQ_EXIT */
@ -204,59 +204,59 @@ typedef struct _trace_rtai_srq_entry
/* TRACE_RTAI_SWITCHTO_LINUX */ /* TRACE_RTAI_SWITCHTO_LINUX */
typedef struct _trace_rtai_switchto_linux typedef struct _trace_rtai_switchto_linux
{ {
uint8_t cpu_id; /* The CPUID being switched to Linux */ uint8_t cpu_id; /* The CPUID being switched to Linux */
} LTT_PACKED_STRUCT trace_rtai_switchto_linux; } LTT_PACKED_STRUCT trace_rtai_switchto_linux;
#define TRACE_RTAI_SWITCHTO_LINUX(ID) \ #define TRACE_RTAI_SWITCHTO_LINUX(ID) \
do \ do \
{\ {\
trace_rtai_switchto_linux switch_event; \ trace_rtai_switchto_linux switch_event; \
switch_event.cpu_id = (uint8_t) ID; \ switch_event.cpu_id = (uint8_t) ID; \
rt_trace_event(TRACE_RTAI_EV_SWITCHTO_LINUX, &switch_event); \ rt_trace_event(TRACE_RTAI_EV_SWITCHTO_LINUX, &switch_event); \
} while(0) } while(0)
/* TRACE_RTAI_SWITCHTO_RT */ /* TRACE_RTAI_SWITCHTO_RT */
typedef struct _trace_rtai_switchto_rt typedef struct _trace_rtai_switchto_rt
{ {
uint8_t cpu_id; /* The CPUID being switched to RT */ uint8_t cpu_id; /* The CPUID being switched to RT */
} LTT_PACKED_STRUCT trace_rtai_switchto_rt; } LTT_PACKED_STRUCT trace_rtai_switchto_rt;
#define TRACE_RTAI_SWITCHTO_RT(ID) \ #define TRACE_RTAI_SWITCHTO_RT(ID) \
do \ do \
{\ {\
trace_rtai_switchto_rt switch_event; \ trace_rtai_switchto_rt switch_event; \
switch_event.cpu_id = (uint8_t) ID; \ switch_event.cpu_id = (uint8_t) ID; \
rt_trace_event(TRACE_RTAI_EV_SWITCHTO_RT, &switch_event); \ rt_trace_event(TRACE_RTAI_EV_SWITCHTO_RT, &switch_event); \
} while(0) } while(0)
/* TRACE_RTAI_SCHED_CHANGE */ /* TRACE_RTAI_SCHED_CHANGE */
typedef struct _trace_rtai_sched_change typedef struct _trace_rtai_sched_change
{ {
uint32_t out; /* Outgoing process */ uint32_t out; /* Outgoing process */
uint32_t in; /* Incoming process */ uint32_t in; /* Incoming process */
uint32_t out_state; /* Outgoing process' state */ uint32_t out_state; /* Outgoing process' state */
} LTT_PACKED_STRUCT trace_rtai_sched_change; } LTT_PACKED_STRUCT trace_rtai_sched_change;
#define TRACE_RTAI_SCHED_CHANGE(OUT, IN, OUT_STATE) \ #define TRACE_RTAI_SCHED_CHANGE(OUT, IN, OUT_STATE) \
do \ do \
{\ {\
trace_rtai_sched_change sched_event;\ trace_rtai_sched_change sched_event;\
sched_event.out = (uint32_t) OUT;\ sched_event.out = (uint32_t) OUT;\
sched_event.in = (uint32_t) IN;\ sched_event.in = (uint32_t) IN;\
sched_event.out_state = (uint32_t) OUT_STATE; \ sched_event.out_state = (uint32_t) OUT_STATE; \
rt_trace_event(TRACE_RTAI_EV_SCHED_CHANGE, &sched_event);\ rt_trace_event(TRACE_RTAI_EV_SCHED_CHANGE, &sched_event);\
} while(0) } while(0)
/* TRACE_RTAI_TASK */ /* TRACE_RTAI_TASK */
#define TRACE_RTAI_EV_TASK_INIT 1 /* Initialize task */ #define TRACE_RTAI_EV_TASK_INIT 1 /* Initialize task */
#define TRACE_RTAI_EV_TASK_DELETE 2 /* Delete task */ #define TRACE_RTAI_EV_TASK_DELETE 2 /* Delete task */
#define TRACE_RTAI_EV_TASK_SIG_HANDLER 3 /* Set signal handler */ #define TRACE_RTAI_EV_TASK_SIG_HANDLER 3 /* Set signal handler */
#define TRACE_RTAI_EV_TASK_YIELD 4 /* Yield CPU control */ #define TRACE_RTAI_EV_TASK_YIELD 4 /* Yield CPU control */
#define TRACE_RTAI_EV_TASK_SUSPEND 5 /* Suspend task */ #define TRACE_RTAI_EV_TASK_SUSPEND 5 /* Suspend task */
#define TRACE_RTAI_EV_TASK_RESUME 6 /* Resume task */ #define TRACE_RTAI_EV_TASK_RESUME 6 /* Resume task */
#define TRACE_RTAI_EV_TASK_MAKE_PERIOD_RELATIVE 7 /* Make task periodic relative in nanoseconds */ #define TRACE_RTAI_EV_TASK_MAKE_PERIOD_RELATIVE 7 /* Make task periodic relative in nanoseconds */
#define TRACE_RTAI_EV_TASK_MAKE_PERIOD 8 /* Make task periodic */ #define TRACE_RTAI_EV_TASK_MAKE_PERIOD 8 /* Make task periodic */
#define TRACE_RTAI_EV_TASK_WAIT_PERIOD 9 /* Wait until the next period */ #define TRACE_RTAI_EV_TASK_WAIT_PERIOD 9 /* Wait until the next period */
#define TRACE_RTAI_EV_TASK_BUSY_SLEEP 10 /* Busy sleep */ #define TRACE_RTAI_EV_TASK_BUSY_SLEEP 10 /* Busy sleep */
#define TRACE_RTAI_EV_TASK_SLEEP 11 /* Sleep */ #define TRACE_RTAI_EV_TASK_SLEEP 11 /* Sleep */
#define TRACE_RTAI_EV_TASK_SLEEP_UNTIL 12 /* Sleep until */ #define TRACE_RTAI_EV_TASK_SLEEP_UNTIL 12 /* Sleep until */
typedef struct _trace_rtai_task typedef struct _trace_rtai_task
{ {
uint8_t event_sub_id; /* Task event ID */ uint8_t event_sub_id; /* Task event ID */
@ -265,22 +265,22 @@ typedef struct _trace_rtai_task
uint64_t event_data3; /* Event data 3 */ uint64_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_task; } LTT_PACKED_STRUCT trace_rtai_task;
#define TRACE_RTAI_TASK(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_TASK(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_task task_event;\ trace_rtai_task task_event;\
task_event.event_sub_id = (uint8_t) ID;\ task_event.event_sub_id = (uint8_t) ID;\
task_event.event_data1 = (uint32_t) DATA1; \ task_event.event_data1 = (uint32_t) DATA1; \
task_event.event_data2 = (uint64_t) DATA2; \ task_event.event_data2 = (uint64_t) DATA2; \
task_event.event_data3 = (uint64_t) DATA3; \ task_event.event_data3 = (uint64_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_TASK, &task_event);\ rt_trace_event(TRACE_RTAI_EV_TASK, &task_event);\
} while(0) } while(0)
/* TRACE_RTAI_TIMER */ /* TRACE_RTAI_TIMER */
#define TRACE_RTAI_EV_TIMER_REQUEST 1 /* Request timer */ #define TRACE_RTAI_EV_TIMER_REQUEST 1 /* Request timer */
#define TRACE_RTAI_EV_TIMER_FREE 2 /* Free timer */ #define TRACE_RTAI_EV_TIMER_FREE 2 /* Free timer */
#define TRACE_RTAI_EV_TIMER_REQUEST_APIC 3 /* Request APIC timers */ #define TRACE_RTAI_EV_TIMER_REQUEST_APIC 3 /* Request APIC timers */
#define TRACE_RTAI_EV_TIMER_APIC_FREE 4 /* Free APIC timers */ #define TRACE_RTAI_EV_TIMER_APIC_FREE 4 /* Free APIC timers */
#define TRACE_RTAI_EV_TIMER_HANDLE_EXPIRY 5 /* Handle timer expiry */ #define TRACE_RTAI_EV_TIMER_HANDLE_EXPIRY 5 /* Handle timer expiry */
typedef struct _trace_rtai_timer typedef struct _trace_rtai_timer
{ {
uint8_t event_sub_id; /* Timer event ID */ uint8_t event_sub_id; /* Timer event ID */
@ -288,22 +288,22 @@ typedef struct _trace_rtai_timer
uint32_t event_data2; /* Event data 2 */ uint32_t event_data2; /* Event data 2 */
} LTT_PACKED_STRUCT trace_rtai_timer; } LTT_PACKED_STRUCT trace_rtai_timer;
#define TRACE_RTAI_TIMER(ID, DATA1, DATA2) \ #define TRACE_RTAI_TIMER(ID, DATA1, DATA2) \
do \ do \
{\ {\
trace_rtai_timer timer_event; \ trace_rtai_timer timer_event; \
timer_event.event_sub_id = (uint8_t) ID; \ timer_event.event_sub_id = (uint8_t) ID; \
timer_event.event_data1 = (uint32_t) DATA1; \ timer_event.event_data1 = (uint32_t) DATA1; \
timer_event.event_data2 = (uint32_t) DATA2; \ timer_event.event_data2 = (uint32_t) DATA2; \
rt_trace_event(TRACE_RTAI_EV_TIMER, &timer_event); \ rt_trace_event(TRACE_RTAI_EV_TIMER, &timer_event); \
} while(0) } while(0)
/* TRACE_RTAI_SEM */ /* TRACE_RTAI_SEM */
#define TRACE_RTAI_EV_SEM_INIT 1 /* Initialize semaphore */ #define TRACE_RTAI_EV_SEM_INIT 1 /* Initialize semaphore */
#define TRACE_RTAI_EV_SEM_DELETE 2 /* Delete semaphore */ #define TRACE_RTAI_EV_SEM_DELETE 2 /* Delete semaphore */
#define TRACE_RTAI_EV_SEM_SIGNAL 3 /* Signal semaphore */ #define TRACE_RTAI_EV_SEM_SIGNAL 3 /* Signal semaphore */
#define TRACE_RTAI_EV_SEM_WAIT 4 /* Wait on semaphore */ #define TRACE_RTAI_EV_SEM_WAIT 4 /* Wait on semaphore */
#define TRACE_RTAI_EV_SEM_WAIT_IF 5 /* Take semaphore if possible */ #define TRACE_RTAI_EV_SEM_WAIT_IF 5 /* Take semaphore if possible */
#define TRACE_RTAI_EV_SEM_WAIT_UNTIL 6 /* Wait on semaphore until a certain time */ #define TRACE_RTAI_EV_SEM_WAIT_UNTIL 6 /* Wait on semaphore until a certain time */
typedef struct _trace_rtai_sem typedef struct _trace_rtai_sem
{ {
uint8_t event_sub_id; /* Semaphore event ID */ uint8_t event_sub_id; /* Semaphore event ID */
@ -311,22 +311,22 @@ typedef struct _trace_rtai_sem
uint64_t event_data2; /* Event data 2 */ uint64_t event_data2; /* Event data 2 */
} LTT_PACKED_STRUCT trace_rtai_sem; } LTT_PACKED_STRUCT trace_rtai_sem;
#define TRACE_RTAI_SEM(ID, DATA1, DATA2) \ #define TRACE_RTAI_SEM(ID, DATA1, DATA2) \
do \ do \
{\ {\
trace_rtai_sem sem_event; \ trace_rtai_sem sem_event; \
sem_event.event_sub_id = (uint8_t) ID; \ sem_event.event_sub_id = (uint8_t) ID; \
sem_event.event_data1 = (uint32_t) DATA1; \ sem_event.event_data1 = (uint32_t) DATA1; \
sem_event.event_data2 = (uint64_t) DATA2; \ sem_event.event_data2 = (uint64_t) DATA2; \
rt_trace_event(TRACE_RTAI_EV_SEM, &sem_event); \ rt_trace_event(TRACE_RTAI_EV_SEM, &sem_event); \
} while(0) } while(0)
/* TRACE_RTAI_MSG */ /* TRACE_RTAI_MSG */
#define TRACE_RTAI_EV_MSG_SEND 1 /* Send a message */ #define TRACE_RTAI_EV_MSG_SEND 1 /* Send a message */
#define TRACE_RTAI_EV_MSG_SEND_IF 2 /* Send if possible */ #define TRACE_RTAI_EV_MSG_SEND_IF 2 /* Send if possible */
#define TRACE_RTAI_EV_MSG_SEND_UNTIL 3 /* Try sending until a certain time */ #define TRACE_RTAI_EV_MSG_SEND_UNTIL 3 /* Try sending until a certain time */
#define TRACE_RTAI_EV_MSG_RECV 4 /* Receive a message */ #define TRACE_RTAI_EV_MSG_RECV 4 /* Receive a message */
#define TRACE_RTAI_EV_MSG_RECV_IF 5 /* Receive if possible */ #define TRACE_RTAI_EV_MSG_RECV_IF 5 /* Receive if possible */
#define TRACE_RTAI_EV_MSG_RECV_UNTIL 6 /* Try receiving until a certain time */ #define TRACE_RTAI_EV_MSG_RECV_UNTIL 6 /* Try receiving until a certain time */
typedef struct _trace_rtai_msg typedef struct _trace_rtai_msg
{ {
uint8_t event_sub_id; /* Message event ID */ uint8_t event_sub_id; /* Message event ID */
@ -335,21 +335,21 @@ typedef struct _trace_rtai_msg
uint64_t event_data3; /* Event data 3 */ uint64_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_msg; } LTT_PACKED_STRUCT trace_rtai_msg;
#define TRACE_RTAI_MSG(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_MSG(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_msg msg_event; \ trace_rtai_msg msg_event; \
msg_event.event_sub_id = (uint8_t) ID; \ msg_event.event_sub_id = (uint8_t) ID; \
msg_event.event_data1 = (uint32_t) DATA1; \ msg_event.event_data1 = (uint32_t) DATA1; \
msg_event.event_data2 = (uint32_t) DATA2; \ msg_event.event_data2 = (uint32_t) DATA2; \
msg_event.event_data3 = (uint64_t) DATA3; \ msg_event.event_data3 = (uint64_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_MSG, &msg_event); \ rt_trace_event(TRACE_RTAI_EV_MSG, &msg_event); \
} while(0) } while(0)
/* TRACE_RTAI_RPC */ /* TRACE_RTAI_RPC */
#define TRACE_RTAI_EV_RPC_MAKE 1 /* Make a remote procedure call */ #define TRACE_RTAI_EV_RPC_MAKE 1 /* Make a remote procedure call */
#define TRACE_RTAI_EV_RPC_MAKE_IF 2 /* Make RPC if receiver is ready */ #define TRACE_RTAI_EV_RPC_MAKE_IF 2 /* Make RPC if receiver is ready */
#define TRACE_RTAI_EV_RPC_MAKE_UNTIL 3 /* Try making an RPC until a certain time */ #define TRACE_RTAI_EV_RPC_MAKE_UNTIL 3 /* Try making an RPC until a certain time */
#define TRACE_RTAI_EV_RPC_RETURN 4 /* Send result of RPC back to caller */ #define TRACE_RTAI_EV_RPC_RETURN 4 /* Send result of RPC back to caller */
typedef struct _trace_rtai_rpc typedef struct _trace_rtai_rpc
{ {
uint8_t event_sub_id; /* RPC event ID */ uint8_t event_sub_id; /* RPC event ID */
@ -358,27 +358,27 @@ typedef struct _trace_rtai_rpc
uint64_t event_data3; /* Event data 3 */ uint64_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_rpc; } LTT_PACKED_STRUCT trace_rtai_rpc;
#define TRACE_RTAI_RPC(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_RPC(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_rpc rpc_event; \ trace_rtai_rpc rpc_event; \
rpc_event.event_sub_id = (uint8_t) ID; \ rpc_event.event_sub_id = (uint8_t) ID; \
rpc_event.event_data1 = (uint32_t) DATA1; \ rpc_event.event_data1 = (uint32_t) DATA1; \
rpc_event.event_data2 = (uint32_t) DATA2; \ rpc_event.event_data2 = (uint32_t) DATA2; \
rpc_event.event_data3 = (uint64_t) DATA3; \ rpc_event.event_data3 = (uint64_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_RPC, &rpc_event); \ rt_trace_event(TRACE_RTAI_EV_RPC, &rpc_event); \
} while(0) } while(0)
/* TRACE_RTAI_MBX */ /* TRACE_RTAI_MBX */
#define TRACE_RTAI_EV_MBX_INIT 1 /* Initialize Message BoX */ #define TRACE_RTAI_EV_MBX_INIT 1 /* Initialize Message BoX */
#define TRACE_RTAI_EV_MBX_DELETE 2 /* Delete message box */ #define TRACE_RTAI_EV_MBX_DELETE 2 /* Delete message box */
#define TRACE_RTAI_EV_MBX_SEND 3 /* Send a message to a message box */ #define TRACE_RTAI_EV_MBX_SEND 3 /* Send a message to a message box */
#define TRACE_RTAI_EV_MBX_SEND_WP 4 /* Send as many bytes as possible */ #define TRACE_RTAI_EV_MBX_SEND_WP 4 /* Send as many bytes as possible */
#define TRACE_RTAI_EV_MBX_SEND_IF 5 /* Send a message if possible */ #define TRACE_RTAI_EV_MBX_SEND_IF 5 /* Send a message if possible */
#define TRACE_RTAI_EV_MBX_SEND_UNTIL 6 /* Try sending until a certain time */ #define TRACE_RTAI_EV_MBX_SEND_UNTIL 6 /* Try sending until a certain time */
#define TRACE_RTAI_EV_MBX_RECV 7 /* Receive a message */ #define TRACE_RTAI_EV_MBX_RECV 7 /* Receive a message */
#define TRACE_RTAI_EV_MBX_RECV_WP 8 /* Receive as many bytes as possible */ #define TRACE_RTAI_EV_MBX_RECV_WP 8 /* Receive as many bytes as possible */
#define TRACE_RTAI_EV_MBX_RECV_IF 9 /* Receive a message if available */ #define TRACE_RTAI_EV_MBX_RECV_IF 9 /* Receive a message if available */
#define TRACE_RTAI_EV_MBX_RECV_UNTIL 10 /* Try receiving until a certain time */ #define TRACE_RTAI_EV_MBX_RECV_UNTIL 10 /* Try receiving until a certain time */
typedef struct _trace_rtai_mbx typedef struct _trace_rtai_mbx
{ {
uint8_t event_sub_id; /* Message Box event ID */ uint8_t event_sub_id; /* Message Box event ID */
@ -387,43 +387,43 @@ typedef struct _trace_rtai_mbx
uint64_t event_data3; /* Event data 3 */ uint64_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_mbx; } LTT_PACKED_STRUCT trace_rtai_mbx;
#define TRACE_RTAI_MBX(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_MBX(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_mbx mbx_event; \ trace_rtai_mbx mbx_event; \
mbx_event.event_sub_id = (uint8_t) ID; \ mbx_event.event_sub_id = (uint8_t) ID; \
mbx_event.event_data1 = (uint32_t) DATA1; \ mbx_event.event_data1 = (uint32_t) DATA1; \
mbx_event.event_data2 = (uint32_t) DATA2; \ mbx_event.event_data2 = (uint32_t) DATA2; \
mbx_event.event_data3 = (uint64_t) DATA3; \ mbx_event.event_data3 = (uint64_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_MBX, &mbx_event); \ rt_trace_event(TRACE_RTAI_EV_MBX, &mbx_event); \
} while(0) } while(0)
/* TRACE_RTAI_FIFO */ /* TRACE_RTAI_FIFO */
#define TRACE_RTAI_EV_FIFO_CREATE 1 /* Create FIFO */ #define TRACE_RTAI_EV_FIFO_CREATE 1 /* Create FIFO */
#define TRACE_RTAI_EV_FIFO_DESTROY 2 /* Destroy FIFO */ #define TRACE_RTAI_EV_FIFO_DESTROY 2 /* Destroy FIFO */
#define TRACE_RTAI_EV_FIFO_RESET 3 /* Reset FIFO */ #define TRACE_RTAI_EV_FIFO_RESET 3 /* Reset FIFO */
#define TRACE_RTAI_EV_FIFO_RESIZE 4 /* Resize FIFO */ #define TRACE_RTAI_EV_FIFO_RESIZE 4 /* Resize FIFO */
#define TRACE_RTAI_EV_FIFO_PUT 5 /* Write data to FIFO */ #define TRACE_RTAI_EV_FIFO_PUT 5 /* Write data to FIFO */
#define TRACE_RTAI_EV_FIFO_GET 6 /* Get data from FIFO */ #define TRACE_RTAI_EV_FIFO_GET 6 /* Get data from FIFO */
#define TRACE_RTAI_EV_FIFO_CREATE_HANDLER 7 /* Install FIFO handler */ #define TRACE_RTAI_EV_FIFO_CREATE_HANDLER 7 /* Install FIFO handler */
#define TRACE_RTAI_EV_FIFO_OPEN 8 /* Open FIFO */ #define TRACE_RTAI_EV_FIFO_OPEN 8 /* Open FIFO */
#define TRACE_RTAI_EV_FIFO_RELEASE 9 /* Release FIFO */ #define TRACE_RTAI_EV_FIFO_RELEASE 9 /* Release FIFO */
#define TRACE_RTAI_EV_FIFO_READ 10 /* Read from FIFO */ #define TRACE_RTAI_EV_FIFO_READ 10 /* Read from FIFO */
#define TRACE_RTAI_EV_FIFO_WRITE 11 /* Write to FIFO */ #define TRACE_RTAI_EV_FIFO_WRITE 11 /* Write to FIFO */
#define TRACE_RTAI_EV_FIFO_READ_TIMED 12 /* Read with time limit */ #define TRACE_RTAI_EV_FIFO_READ_TIMED 12 /* Read with time limit */
#define TRACE_RTAI_EV_FIFO_WRITE_TIMED 13 /* Write with time limit */ #define TRACE_RTAI_EV_FIFO_WRITE_TIMED 13 /* Write with time limit */
#define TRACE_RTAI_EV_FIFO_READ_ALLATONCE 14 /* Read all the data from FIFO */ #define TRACE_RTAI_EV_FIFO_READ_ALLATONCE 14 /* Read all the data from FIFO */
#define TRACE_RTAI_EV_FIFO_LLSEEK 15 /* Seek position into FIFO */ #define TRACE_RTAI_EV_FIFO_LLSEEK 15 /* Seek position into FIFO */
#define TRACE_RTAI_EV_FIFO_FASYNC 16 /* Asynchronous notification */ #define TRACE_RTAI_EV_FIFO_FASYNC 16 /* Asynchronous notification */
#define TRACE_RTAI_EV_FIFO_IOCTL 17 /* IO control on FIFO */ #define TRACE_RTAI_EV_FIFO_IOCTL 17 /* IO control on FIFO */
#define TRACE_RTAI_EV_FIFO_POLL 18 /* Poll FIFO */ #define TRACE_RTAI_EV_FIFO_POLL 18 /* Poll FIFO */
#define TRACE_RTAI_EV_FIFO_SUSPEND_TIMED 19 /* Suspend task for given period */ #define TRACE_RTAI_EV_FIFO_SUSPEND_TIMED 19 /* Suspend task for given period */
#define TRACE_RTAI_EV_FIFO_SET_ASYNC_SIG 20 /* Set asynchrounous signal */ #define TRACE_RTAI_EV_FIFO_SET_ASYNC_SIG 20 /* Set asynchrounous signal */
#define TRACE_RTAI_EV_FIFO_SEM_INIT 21 /* Initialize semaphore */ #define TRACE_RTAI_EV_FIFO_SEM_INIT 21 /* Initialize semaphore */
#define TRACE_RTAI_EV_FIFO_SEM_POST 22 /* Post semaphore */ #define TRACE_RTAI_EV_FIFO_SEM_POST 22 /* Post semaphore */
#define TRACE_RTAI_EV_FIFO_SEM_WAIT 23 /* Wait on semaphore */ #define TRACE_RTAI_EV_FIFO_SEM_WAIT 23 /* Wait on semaphore */
#define TRACE_RTAI_EV_FIFO_SEM_TRY_WAIT 24 /* Try waiting on semaphore */ #define TRACE_RTAI_EV_FIFO_SEM_TRY_WAIT 24 /* Try waiting on semaphore */
#define TRACE_RTAI_EV_FIFO_SEM_TIMED_WAIT 25 /* Wait on semaphore until a certain time */ #define TRACE_RTAI_EV_FIFO_SEM_TIMED_WAIT 25 /* Wait on semaphore until a certain time */
#define TRACE_RTAI_EV_FIFO_SEM_DESTROY 26 /* Destroy semaphore */ #define TRACE_RTAI_EV_FIFO_SEM_DESTROY 26 /* Destroy semaphore */
typedef struct _trace_rtai_fifo typedef struct _trace_rtai_fifo
{ {
uint8_t event_sub_id; /* FIFO event ID */ uint8_t event_sub_id; /* FIFO event ID */
@ -431,21 +431,21 @@ typedef struct _trace_rtai_fifo
uint32_t event_data2; /* Event data 2 */ uint32_t event_data2; /* Event data 2 */
} LTT_PACKED_STRUCT trace_rtai_fifo; } LTT_PACKED_STRUCT trace_rtai_fifo;
#define TRACE_RTAI_FIFO(ID, DATA1, DATA2) \ #define TRACE_RTAI_FIFO(ID, DATA1, DATA2) \
do \ do \
{\ {\
trace_rtai_fifo fifo_event; \ trace_rtai_fifo fifo_event; \
fifo_event.event_sub_id = (uint8_t) ID; \ fifo_event.event_sub_id = (uint8_t) ID; \
fifo_event.event_data1 = (uint32_t) DATA1; \ fifo_event.event_data1 = (uint32_t) DATA1; \
fifo_event.event_data2 = (uint32_t) DATA2; \ fifo_event.event_data2 = (uint32_t) DATA2; \
rt_trace_event(TRACE_RTAI_EV_FIFO, &fifo_event); \ rt_trace_event(TRACE_RTAI_EV_FIFO, &fifo_event); \
} while(0) } while(0)
/* TRACE_RTAI_SHM */ /* TRACE_RTAI_SHM */
#define TRACE_RTAI_EV_SHM_MALLOC 1 /* Allocate shared memory */ #define TRACE_RTAI_EV_SHM_MALLOC 1 /* Allocate shared memory */
#define TRACE_RTAI_EV_SHM_KMALLOC 2 /* Allocate shared memory in kernel space */ #define TRACE_RTAI_EV_SHM_KMALLOC 2 /* Allocate shared memory in kernel space */
#define TRACE_RTAI_EV_SHM_GET_SIZE 3 /* Get the size of the shared memory area */ #define TRACE_RTAI_EV_SHM_GET_SIZE 3 /* Get the size of the shared memory area */
#define TRACE_RTAI_EV_SHM_FREE 4 /* Free shared memory */ #define TRACE_RTAI_EV_SHM_FREE 4 /* Free shared memory */
#define TRACE_RTAI_EV_SHM_KFREE 5 /* Free kernel space shared memory */ #define TRACE_RTAI_EV_SHM_KFREE 5 /* Free kernel space shared memory */
typedef struct _trace_rtai_shm typedef struct _trace_rtai_shm
{ {
uint8_t event_sub_id; /* SHared Memory event ID */ uint8_t event_sub_id; /* SHared Memory event ID */
@ -454,30 +454,30 @@ typedef struct _trace_rtai_shm
uint32_t event_data3; /* Event data 3 */ uint32_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_shm; } LTT_PACKED_STRUCT trace_rtai_shm;
#define TRACE_RTAI_SHM(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_SHM(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_shm shm_event; \ trace_rtai_shm shm_event; \
shm_event.event_sub_id = (uint8_t) ID; \ shm_event.event_sub_id = (uint8_t) ID; \
shm_event.event_data1 = (uint32_t) DATA1; \ shm_event.event_data1 = (uint32_t) DATA1; \
shm_event.event_data2 = (uint32_t) DATA2; \ shm_event.event_data2 = (uint32_t) DATA2; \
shm_event.event_data3 = (uint32_t) DATA3; \ shm_event.event_data3 = (uint32_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_SHM, &shm_event); \ rt_trace_event(TRACE_RTAI_EV_SHM, &shm_event); \
} while(0) } while(0)
/* TRACE_RTAI_POSIX */ /* TRACE_RTAI_POSIX */
#define TRACE_RTAI_EV_POSIX_MQ_OPEN 1 /* Open/create message queue */ #define TRACE_RTAI_EV_POSIX_MQ_OPEN 1 /* Open/create message queue */
#define TRACE_RTAI_EV_POSIX_MQ_CLOSE 2 /* Close message queue */ #define TRACE_RTAI_EV_POSIX_MQ_CLOSE 2 /* Close message queue */
#define TRACE_RTAI_EV_POSIX_MQ_SEND 3 /* Send message to queue */ #define TRACE_RTAI_EV_POSIX_MQ_SEND 3 /* Send message to queue */
#define TRACE_RTAI_EV_POSIX_MQ_RECV 4 /* Receive message from queue */ #define TRACE_RTAI_EV_POSIX_MQ_RECV 4 /* Receive message from queue */
#define TRACE_RTAI_EV_POSIX_MQ_GET_ATTR 5 /* Get message queue attributes */ #define TRACE_RTAI_EV_POSIX_MQ_GET_ATTR 5 /* Get message queue attributes */
#define TRACE_RTAI_EV_POSIX_MQ_SET_ATTR 6 /* Set message queue attributes */ #define TRACE_RTAI_EV_POSIX_MQ_SET_ATTR 6 /* Set message queue attributes */
#define TRACE_RTAI_EV_POSIX_MQ_NOTIFY 7 /* Register to be notified of message arrival */ #define TRACE_RTAI_EV_POSIX_MQ_NOTIFY 7 /* Register to be notified of message arrival */
#define TRACE_RTAI_EV_POSIX_MQ_UNLINK 8 /* Destroy message queue */ #define TRACE_RTAI_EV_POSIX_MQ_UNLINK 8 /* Destroy message queue */
#define TRACE_RTAI_EV_POSIX_PTHREAD_CREATE 9 /* Create RT task */ #define TRACE_RTAI_EV_POSIX_PTHREAD_CREATE 9 /* Create RT task */
#define TRACE_RTAI_EV_POSIX_PTHREAD_EXIT 10 /* Terminate calling thread */ #define TRACE_RTAI_EV_POSIX_PTHREAD_EXIT 10 /* Terminate calling thread */
#define TRACE_RTAI_EV_POSIX_PTHREAD_SELF 11 /* Get thread ID */ #define TRACE_RTAI_EV_POSIX_PTHREAD_SELF 11 /* Get thread ID */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_INIT 12 /* Initialize thread attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_INIT 12 /* Initialize thread attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_DESTROY 13 /* Destroy thread attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_DESTROY 13 /* Destroy thread attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETDETACHSTATE 14 /* Set detach state of thread */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETDETACHSTATE 14 /* Set detach state of thread */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETDETACHSTATE 15 /* Get detach state of thread */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETDETACHSTATE 15 /* Get detach state of thread */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETSCHEDPARAM 16 /* Set thread scheduling parameters */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETSCHEDPARAM 16 /* Set thread scheduling parameters */
@ -486,28 +486,28 @@ typedef struct _trace_rtai_shm
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETSCHEDPOLICY 19 /* Get thread scheduling policy */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETSCHEDPOLICY 19 /* Get thread scheduling policy */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETINHERITSCHED 20 /* Set thread scheduling inheritance */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETINHERITSCHED 20 /* Set thread scheduling inheritance */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETINHERITSCHED 21 /* Get thread scheduling inheritance */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETINHERITSCHED 21 /* Get thread scheduling inheritance */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETSCOPE 22 /* Set thread scheduling scope */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_SETSCOPE 22 /* Set thread scheduling scope */
#define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETSCOPE 23 /* Get thread scheduling scope */ #define TRACE_RTAI_EV_POSIX_PTHREAD_ATTR_GETSCOPE 23 /* Get thread scheduling scope */
#define TRACE_RTAI_EV_POSIX_PTHREAD_SCHED_YIELD 24 /* Yield processor control */ #define TRACE_RTAI_EV_POSIX_PTHREAD_SCHED_YIELD 24 /* Yield processor control */
#define TRACE_RTAI_EV_POSIX_PTHREAD_CLOCK_GETTIME 25 /* Get current clock count */ #define TRACE_RTAI_EV_POSIX_PTHREAD_CLOCK_GETTIME 25 /* Get current clock count */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_INIT 26 /* Initialize mutex */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_INIT 26 /* Initialize mutex */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_DESTROY 27 /* Destroy mutex */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_DESTROY 27 /* Destroy mutex */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_INIT 28 /* Initiatize mutex attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_INIT 28 /* Initiatize mutex attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_DESTROY 29 /* Destroy mutex attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_DESTROY 29 /* Destroy mutex attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_SETKIND_NP 30 /* Set kind of attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_SETKIND_NP 30 /* Set kind of attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_GETKIND_NP 31 /* Get kind of attribute */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEXATTR_GETKIND_NP 31 /* Get kind of attribute */
#define TRACE_RTAI_EV_POSIX_PTHREAD_SETSCHEDPARAM 32 /* Set scheduling parameters */ #define TRACE_RTAI_EV_POSIX_PTHREAD_SETSCHEDPARAM 32 /* Set scheduling parameters */
#define TRACE_RTAI_EV_POSIX_PTHREAD_GETSCHEDPARAM 33 /* Get scheduling parameters */ #define TRACE_RTAI_EV_POSIX_PTHREAD_GETSCHEDPARAM 33 /* Get scheduling parameters */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_TRY_LOCK 34 /* Non-blocking mutex lock */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_TRY_LOCK 34 /* Non-blocking mutex lock */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_LOCK 35 /* Blocking mutex lock */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_LOCK 35 /* Blocking mutex lock */
#define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_UNLOCK 36 /* Mutex unlock */ #define TRACE_RTAI_EV_POSIX_PTHREAD_MUTEX_UNLOCK 36 /* Mutex unlock */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_INIT 37 /* Initialize conditionnal variable */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_INIT 37 /* Initialize conditionnal variable */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_DESTROY 38 /* Destroy cond. variable */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_DESTROY 38 /* Destroy cond. variable */
#define TRACE_RTAI_EV_POSIX_PTHREAD_CONDATTR_INIT 39 /* Initialize cond. attribute variable */ #define TRACE_RTAI_EV_POSIX_PTHREAD_CONDATTR_INIT 39 /* Initialize cond. attribute variable */
#define TRACE_RTAI_EV_POSIX_PTHREAD_CONDATTR_DESTROY 40 /* Destroy cond. attribute variable */ #define TRACE_RTAI_EV_POSIX_PTHREAD_CONDATTR_DESTROY 40 /* Destroy cond. attribute variable */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_WAIT 41 /* Wait for cond. variable to be signaled */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_WAIT 41 /* Wait for cond. variable to be signaled */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_TIMEDWAIT 42 /* Wait for a certain time */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_TIMEDWAIT 42 /* Wait for a certain time */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_SIGNAL 43 /* Signal a waiting thread */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_SIGNAL 43 /* Signal a waiting thread */
#define TRACE_RTAI_EV_POSIX_PTHREAD_COND_BROADCAST 44 /* Signal all waiting threads */ #define TRACE_RTAI_EV_POSIX_PTHREAD_COND_BROADCAST 44 /* Signal all waiting threads */
typedef struct _trace_rtai_posix typedef struct _trace_rtai_posix
{ {
@ -517,25 +517,25 @@ typedef struct _trace_rtai_posix
uint32_t event_data3; /* Event data 3 */ uint32_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_posix; } LTT_PACKED_STRUCT trace_rtai_posix;
#define TRACE_RTAI_POSIX(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_POSIX(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_posix posix_event; \ trace_rtai_posix posix_event; \
posix_event.event_sub_id = (uint8_t) ID; \ posix_event.event_sub_id = (uint8_t) ID; \
posix_event.event_data1 = (uint32_t) DATA1; \ posix_event.event_data1 = (uint32_t) DATA1; \
posix_event.event_data2 = (uint32_t) DATA2; \ posix_event.event_data2 = (uint32_t) DATA2; \
posix_event.event_data3 = (uint32_t) DATA3; \ posix_event.event_data3 = (uint32_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_POSIX, &posix_event); \ rt_trace_event(TRACE_RTAI_EV_POSIX, &posix_event); \
} while(0) } while(0)
/* TRACE_RTAI_LXRT */ /* TRACE_RTAI_LXRT */
#define TRACE_RTAI_EV_LXRT_RTAI_SYSCALL_ENTRY 1 /* Entry in LXRT syscall */ #define TRACE_RTAI_EV_LXRT_RTAI_SYSCALL_ENTRY 1 /* Entry in LXRT syscall */
#define TRACE_RTAI_EV_LXRT_RTAI_SYSCALL_EXIT 2 /* Exit from LXRT syscall */ #define TRACE_RTAI_EV_LXRT_RTAI_SYSCALL_EXIT 2 /* Exit from LXRT syscall */
#define TRACE_RTAI_EV_LXCHANGE 3 /* Scheduling change */ #define TRACE_RTAI_EV_LXCHANGE 3 /* Scheduling change */
#define TRACE_RTAI_EV_LXRT_STEAL_TASK 4 /* Take task control from Linux */ #define TRACE_RTAI_EV_LXRT_STEAL_TASK 4 /* Take task control from Linux */
#define TRACE_RTAI_EV_LXRT_GIVE_BACK_TASK 5 /* Give task control back to Linux */ #define TRACE_RTAI_EV_LXRT_GIVE_BACK_TASK 5 /* Give task control back to Linux */
#define TRACE_RTAI_EV_LXRT_SUSPEND 6 /* Suspend a task */ #define TRACE_RTAI_EV_LXRT_SUSPEND 6 /* Suspend a task */
#define TRACE_RTAI_EV_LXRT_RESUME 7 /* Resume task's execution */ #define TRACE_RTAI_EV_LXRT_RESUME 7 /* Resume task's execution */
#define TRACE_RTAI_EV_LXRT_HANDLE 8 /* Handle a request for an RTAI service */ #define TRACE_RTAI_EV_LXRT_HANDLE 8 /* Handle a request for an RTAI service */
typedef struct _trace_rtai_lxrt typedef struct _trace_rtai_lxrt
{ {
uint8_t event_sub_id; /* LXRT event ID */ uint8_t event_sub_id; /* LXRT event ID */
@ -544,27 +544,27 @@ typedef struct _trace_rtai_lxrt
uint32_t event_data3; /* Event data 3 */ uint32_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_lxrt; } LTT_PACKED_STRUCT trace_rtai_lxrt;
#define TRACE_RTAI_LXRT(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_LXRT(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_lxrt lxrt_event; \ trace_rtai_lxrt lxrt_event; \
lxrt_event.event_sub_id = (uint8_t) ID; \ lxrt_event.event_sub_id = (uint8_t) ID; \
lxrt_event.event_data1 = (uint32_t) DATA1; \ lxrt_event.event_data1 = (uint32_t) DATA1; \
lxrt_event.event_data2 = (uint32_t) DATA2; \ lxrt_event.event_data2 = (uint32_t) DATA2; \
lxrt_event.event_data3 = (uint32_t) DATA3; \ lxrt_event.event_data3 = (uint32_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_LXRT, &lxrt_event); \ rt_trace_event(TRACE_RTAI_EV_LXRT, &lxrt_event); \
} while(0) } while(0)
/* TRACE_RTAI_LXRTI */ /* TRACE_RTAI_LXRTI */
#define TRACE_RTAI_EV_LXRTI_NAME_ATTACH 1 /* Register current process as name */ #define TRACE_RTAI_EV_LXRTI_NAME_ATTACH 1 /* Register current process as name */
#define TRACE_RTAI_EV_LXRTI_NAME_LOCATE 2 /* Locate a given process usint it's name */ #define TRACE_RTAI_EV_LXRTI_NAME_LOCATE 2 /* Locate a given process usint it's name */
#define TRACE_RTAI_EV_LXRTI_NAME_DETACH 3 /* Detach process from name */ #define TRACE_RTAI_EV_LXRTI_NAME_DETACH 3 /* Detach process from name */
#define TRACE_RTAI_EV_LXRTI_SEND 4 /* Send message to PID */ #define TRACE_RTAI_EV_LXRTI_SEND 4 /* Send message to PID */
#define TRACE_RTAI_EV_LXRTI_RECV 5 /* Receive message */ #define TRACE_RTAI_EV_LXRTI_RECV 5 /* Receive message */
#define TRACE_RTAI_EV_LXRTI_CRECV 6 /* Non-blocking receive */ #define TRACE_RTAI_EV_LXRTI_CRECV 6 /* Non-blocking receive */
#define TRACE_RTAI_EV_LXRTI_REPLY 7 /* Reply to message received */ #define TRACE_RTAI_EV_LXRTI_REPLY 7 /* Reply to message received */
#define TRACE_RTAI_EV_LXRTI_PROXY_ATTACH 8 /* Attach proxy to process */ #define TRACE_RTAI_EV_LXRTI_PROXY_ATTACH 8 /* Attach proxy to process */
#define TRACE_RTAI_EV_LXRTI_PROXY_DETACH 9 /* Detach proxy from process */ #define TRACE_RTAI_EV_LXRTI_PROXY_DETACH 9 /* Detach proxy from process */
#define TRACE_RTAI_EV_LXRTI_TRIGGER 10 /* Trigger proxy */ #define TRACE_RTAI_EV_LXRTI_TRIGGER 10 /* Trigger proxy */
typedef struct _trace_rtai_lxrti typedef struct _trace_rtai_lxrti
{ {
uint8_t event_sub_id; /* LXRT event ID */ uint8_t event_sub_id; /* LXRT event ID */
@ -573,15 +573,15 @@ typedef struct _trace_rtai_lxrti
uint64_t event_data3; /* Event data 3 */ uint64_t event_data3; /* Event data 3 */
} LTT_PACKED_STRUCT trace_rtai_lxrti; } LTT_PACKED_STRUCT trace_rtai_lxrti;
#define TRACE_RTAI_LXRTI(ID, DATA1, DATA2, DATA3) \ #define TRACE_RTAI_LXRTI(ID, DATA1, DATA2, DATA3) \
do \ do \
{\ {\
trace_rtai_lxrti lxrti_event; \ trace_rtai_lxrti lxrti_event; \
lxrti_event.event_sub_id = (uint8_t) ID; \ lxrti_event.event_sub_id = (uint8_t) ID; \
lxrti_event.event_data1 = (uint32_t) DATA1; \ lxrti_event.event_data1 = (uint32_t) DATA1; \
lxrti_event.event_data2 = (uint32_t) DATA2; \ lxrti_event.event_data2 = (uint32_t) DATA2; \
lxrti_event.event_data3 = (uint64_t) DATA3; \ lxrti_event.event_data3 = (uint64_t) DATA3; \
rt_trace_event(TRACE_RTAI_EV_LXRTI, &lxrti_event); \ rt_trace_event(TRACE_RTAI_EV_LXRTI, &lxrti_event); \
} while(0) } while(0)
#else /* !(CONFIG_RTAI_TRACE && __KERNEL__) */ #else /* !(CONFIG_RTAI_TRACE && __KERNEL__) */
#define RT_TRACE_EVENT(ID, DATA) #define RT_TRACE_EVENT(ID, DATA)

View file

@ -38,7 +38,7 @@
#ifndef DECLARE_MUTEX_LOCKED #ifndef DECLARE_MUTEX_LOCKED
#ifndef __DECLARE_SEMAPHORE_GENERIC #ifndef __DECLARE_SEMAPHORE_GENERIC
#define DECLARE_MUTEX_LOCKED(name) \ #define DECLARE_MUTEX_LOCKED(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 0) struct semaphore name = __SEMAPHORE_INITIALIZER(name, 0)
#else #else
#define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0) #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
#endif #endif
@ -69,7 +69,7 @@ typedef int timer_t;
#ifndef __deprecated #ifndef __deprecated
#define container_of(ptr, type, member) \ #define container_of(ptr, type, member) \
({ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ ({ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)( (char *)__mptr - offsetof(type,member) );}) (type *)( (char *)__mptr - offsetof(type,member) );})
#endif #endif
#ifndef __deprecated #ifndef __deprecated
@ -77,7 +77,7 @@ typedef int timer_t;
#endif #endif
#define RTAI_MODULE_PARM_ARRAY(name, type, addr, size) \ #define RTAI_MODULE_PARM_ARRAY(name, type, addr, size) \
static inline void *__check_existence_##name(void) { return &name; } \ static inline void *__check_existence_##name(void) { return &name; } \
MODULE_PARM(name, "1-" __MODULE_STRING(size) _MODULE_PARM_STRING_ ## type); MODULE_PARM(name, "1-" __MODULE_STRING(size) _MODULE_PARM_STRING_ ## type);
#define _MODULE_PARM_STRING_charp "s" #define _MODULE_PARM_STRING_charp "s"
@ -126,7 +126,7 @@ do { \
static inline unsigned long hweight_long(unsigned long w) static inline unsigned long hweight_long(unsigned long w)
{ {
return sizeof(w) == 4 ? hweight32(w) : hweight32(w); return sizeof(w) == 4 ? hweight32(w) : hweight32(w);
} }
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */ #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) */

View file

@ -50,16 +50,16 @@
#define RTDM_INDX 15 #define RTDM_INDX 15
#define __rtdm_fdcount 0 #define __rtdm_fdcount 0
#define __rtdm_open 1 #define __rtdm_open 1
#define __rtdm_socket 2 #define __rtdm_socket 2
#define __rtdm_close 3 #define __rtdm_close 3
#define __rtdm_ioctl 4 #define __rtdm_ioctl 4
#define __rtdm_read 5 #define __rtdm_read 5
#define __rtdm_write 6 #define __rtdm_write 6
#define __rtdm_recvmsg 7 #define __rtdm_recvmsg 7
#define __rtdm_sendmsg 8 #define __rtdm_sendmsg 8
#define __rtdm_select 9 #define __rtdm_select 9
#ifdef __KERNEL__ #ifdef __KERNEL__
@ -132,7 +132,7 @@ typedef int64_t nanosecs_rel_t;
#define RTDM_CLASS_NETWORK 4 #define RTDM_CLASS_NETWORK 4
#define RTDM_CLASS_RTMAC 5 #define RTDM_CLASS_RTMAC 5
#define RTDM_CLASS_TESTING 6 #define RTDM_CLASS_TESTING 6
#define RTDM_CLASS_RTIPC 7 #define RTDM_CLASS_RTIPC 7
/* /*
#define RTDM_CLASS_USB ? #define RTDM_CLASS_USB ?
#define RTDM_CLASS_FIREWIRE ? #define RTDM_CLASS_FIREWIRE ?
@ -323,17 +323,7 @@ static inline ssize_t rt_dev_recvfrom(int fd, void *buf, size_t len, int flags,
static inline int rt_dev_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, nanosecs_rel_t timeout) static inline int rt_dev_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, nanosecs_rel_t timeout)
{ {
#ifdef CONFIG_RTAI_RTDM_SELECT
struct xnselector *selector;
int ret;
selector = rt_malloc(sizeof(struct xnselector));
xnselector_init(selector);
ret = __rt_dev_select(nfds, rfds, wfds, efds, timeout, selector, 1);
xnselector_destroy(selector);
return ret;
#else
return -ENOSYS; return -ENOSYS;
#endif
} }
#else /* !__KERNEL__ */ #else /* !__KERNEL__ */
@ -364,26 +354,26 @@ static inline int RTDM_RTAI_LXRT(int ext, int lsize, int srq, void *arg)
static inline int rt_dev_fdcount(void) static inline int rt_dev_fdcount(void)
{ {
struct { long dummy; } arg = { 0 }; struct { long dummy; } arg = { 0 };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_fdcount, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_fdcount, &arg);
} }
static inline int rt_dev_open(const char *path, int oflag, ...) static inline int rt_dev_open(const char *path, int oflag, ...)
{ {
struct { const char *path; long oflag; } arg = { path, oflag }; struct { const char *path; long oflag; } arg = { path, oflag };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_open, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_open, &arg);
} }
static inline int rt_dev_socket(int protocol_family, int socket_type, int protocol) static inline int rt_dev_socket(int protocol_family, int socket_type, int protocol)
{ {
struct { long protocol_family; long socket_type; long protocol; } arg = { protocol_family, socket_type, protocol }; struct { long protocol_family; long socket_type; long protocol; } arg = { protocol_family, socket_type, protocol };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_socket, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_socket, &arg);
} }
static inline int rt_dev_close(int fd) static inline int rt_dev_close(int fd)
{ {
struct { long fd; } arg = { fd }; struct { long fd; } arg = { fd };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_close, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_close, &arg);
} }
static inline int rt_dev_ioctl(int fd, int request, ...) static inline int rt_dev_ioctl(int fd, int request, ...)
@ -398,20 +388,20 @@ static inline int rt_dev_ioctl(int fd, int request, ...)
static inline ssize_t rt_dev_read(int fd, void *buf, size_t nbytes) static inline ssize_t rt_dev_read(int fd, void *buf, size_t nbytes)
{ {
struct { long fd; void *buf; long nbytes; } arg = { fd, buf, nbytes }; struct { long fd; void *buf; long nbytes; } arg = { fd, buf, nbytes };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_read, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_read, &arg);
} }
static inline ssize_t rt_dev_write(int fd, const void *buf, size_t nbytes) static inline ssize_t rt_dev_write(int fd, const void *buf, size_t nbytes)
{ {
struct { long fd; const void *buf; long nbytes; } arg = { fd, buf, nbytes }; struct { long fd; const void *buf; long nbytes; } arg = { fd, buf, nbytes };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_write, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_write, &arg);
} }
static inline ssize_t rt_dev_recvmsg(int fd, struct msghdr *msg, int flags) static inline ssize_t rt_dev_recvmsg(int fd, struct msghdr *msg, int flags)
{ {
struct { long fd; struct msghdr *msg; long flags; } arg = { fd, msg, flags }; struct { long fd; struct msghdr *msg; long flags; } arg = { fd, msg, flags };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_recvmsg, &arg); return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_recvmsg, &arg);
} }
static inline ssize_t rt_dev_sendmsg(int fd, const struct msghdr *msg, int flags) static inline ssize_t rt_dev_sendmsg(int fd, const struct msghdr *msg, int flags)
@ -445,12 +435,7 @@ static inline ssize_t rt_dev_recvfrom(int fd, void *buf, size_t len, int flags,
static inline int rt_dev_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, nanosecs_rel_t timeout) static inline int rt_dev_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, nanosecs_rel_t timeout)
{ {
#ifdef CONFIG_RTAI_RTDM_SELECT return -ENOSYS;
struct { long nfds; fd_set *rfds; fd_set *wfds; fd_set *efds; nanosecs_rel_t timeout; } arg = { nfds, rfds, wfds, efds, timeout };
return RTDM_RTAI_LXRT(RTDM_INDX, SIZARG, __rtdm_select, &arg);
#else
return -ENOSYS;
#endif
} }
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -52,23 +52,23 @@ struct xnselector {
static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp) static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp)
{ {
unsigned long __tmp = __fd / __NFDBITS__; unsigned long __tmp = __fd / __NFDBITS__;
unsigned long __rem = __fd % __NFDBITS__; unsigned long __rem = __fd % __NFDBITS__;
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem); __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
} }
static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp) static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp)
{ {
unsigned long __tmp = __fd / __NFDBITS__; unsigned long __tmp = __fd / __NFDBITS__;
unsigned long __rem = __fd % __NFDBITS__; unsigned long __rem = __fd % __NFDBITS__;
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
} }
static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p) static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p)
{ {
unsigned long __tmp = __fd / __NFDBITS__; unsigned long __tmp = __fd / __NFDBITS__;
unsigned long __rem = __fd % __NFDBITS__; unsigned long __rem = __fd % __NFDBITS__;
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
} }
static inline void __FD_ZERO__(__kernel_fd_set *__p) static inline void __FD_ZERO__(__kernel_fd_set *__p)
@ -84,77 +84,6 @@ static inline void __FD_ZERO__(__kernel_fd_set *__p)
} }
} }
#ifdef CONFIG_RTAI_RTDM_SELECT
struct xnselect {
xnqueue_t bindings;
};
#define DECLARE_XNSELECT(name) struct xnselect name
struct xnselect_binding {
struct xnselector *selector;
struct xnselect *fd;
unsigned type;
unsigned bit_index;
xnholder_t link; /* link in selected fds list. */
xnholder_t slink; /* link in selector list */
};
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
void xnselect_init(struct xnselect *select_block);
int xnselect_bind(struct xnselect *select_block,
struct xnselect_binding *binding,
struct xnselector *selector,
unsigned type,
unsigned bit_index,
unsigned state);
int __xnselect_signal(struct xnselect *select_block, unsigned state);
/**
* Signal a file descriptor state change.
*
* @param select_block pointer to an @a xnselect structure representing the file
* descriptor whose state changed;
* @param state new value of the state.
*
* @retval 1 if rescheduling is needed;
* @retval 0 otherwise.
*/
static inline int
xnselect_signal(struct xnselect *select_block, unsigned state)
{
if (!emptyq_p(&select_block->bindings))
return __xnselect_signal(select_block, state);
return 0;
}
void xnselect_destroy(struct xnselect *select_block);
int xnselector_init(struct xnselector *selector);
int xnselect(struct xnselector *selector,
fd_set *out_fds[XNSELECT_MAX_TYPES],
fd_set *in_fds[XNSELECT_MAX_TYPES],
int nfds,
xnticks_t timeout, xntmode_t timeout_mode);
void xnselector_destroy(struct xnselector *selector);
int xnselect_mount(void);
int xnselect_umount(void);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#else /* !CONFIG_RTAI_RTDM_SELECT */
struct xnselector; struct xnselector;
#define DECLARE_XNSELECT(name) #define DECLARE_XNSELECT(name)
#define xnselect_init(block) #define xnselect_init(block)
@ -162,7 +91,6 @@ struct xnselector;
({ -EBADF; }) ({ -EBADF; })
#define xnselect_signal(block, state) ({ int __ret = 0; __ret; }) #define xnselect_signal(block, state) ({ int __ret = 0; __ret; })
#define xnselect_destroy(block) #define xnselect_destroy(block)
#endif /* !CONFIG_RTAI_RTDM_SELECT */
/*@}*/ /*@}*/

View file

@ -30,18 +30,12 @@
#include <rtai_schedcore.h> #include <rtai_schedcore.h>
#define CONFIG_RTAI_OPT_PERVASIVE
#ifndef CONFIG_RTAI_DEBUG_RTDM
#define CONFIG_RTAI_DEBUG_RTDM 0
#endif
#define RTAI_DEBUG(subsystem) (CONFIG_RTAI_DEBUG_##subsystem > 0) #define RTAI_DEBUG(subsystem) (CONFIG_RTAI_DEBUG_##subsystem > 0)
#define RTAI_ASSERT(subsystem, cond, action) do { \ #define RTAI_ASSERT(subsystem, cond, action) do { \
if (unlikely(CONFIG_RTAI_DEBUG_##subsystem > 0 && !(cond))) { \ if (unlikely(CONFIG_RTAI_DEBUG_##subsystem > 0 && !(cond))) { \
xnlogerr("assertion failed at %s:%d (%s)\n", __FILE__, __LINE__, (#cond)); \ xnlogerr("assertion failed at %s:%d (%s)\n", __FILE__, __LINE__, (#cond)); \
action; \ action; \
} \ } \
} while(0) } while(0)
@ -55,33 +49,33 @@
of specific assertions we care of. of specific assertions we care of.
*/ */
#define xnpod_root_p() (!current->rtai_tskext(TSKEXT0) || !((RT_TASK *)(current->rtai_tskext(TSKEXT0)))->is_hard) #define xnpod_root_p() (!current->rtai_tskext(TSKEXT0) || !((RT_TASK *)(current->rtai_tskext(TSKEXT0)))->is_hard)
#define xnshadow_thread(t) ((xnthread_t *)current->rtai_tskext(TSKEXT0)) #define xnshadow_thread(t) ((xnthread_t *)current->rtai_tskext(TSKEXT0))
#define rthal_local_irq_test() (!rtai_save_flags_irqbit()) #define rthal_local_irq_test() (!rtai_save_flags_irqbit())
#define rthal_local_irq_enable rtai_sti #define rthal_local_irq_enable rtai_sti
#define rthal_domain rtai_domain #define rthal_domain rtai_domain
#define rthal_local_irq_disabled() \ #define rthal_local_irq_disabled() \
({ \ ({ \
unsigned long __flags, __ret; \ unsigned long __flags, __ret; \
local_irq_save_hw_smp(__flags); \ local_irq_save_hw_smp(__flags); \
__ret = ipipe_test_pipeline_from(&rthal_domain); \ __ret = ipipe_test_pipeline_from(&rthal_domain); \
local_irq_restore_hw_smp(__flags); \ local_irq_restore_hw_smp(__flags); \
__ret; \ __ret; \
}) })
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#define _MODULE_PARM_STRING_charp "s" #define _MODULE_PARM_STRING_charp "s"
#define compat_module_param_array(name, type, count, perm) \ #define compat_module_param_array(name, type, count, perm) \
static inline void *__check_existence_##name(void) { return &name; } \ static inline void *__check_existence_##name(void) { return &name; } \
MODULE_PARM(name, "1-" __MODULE_STRING(count) _MODULE_PARM_STRING_##type) MODULE_PARM(name, "1-" __MODULE_STRING(count) _MODULE_PARM_STRING_##type)
typedef unsigned long phys_addr_t; typedef unsigned long phys_addr_t;
#else #else
#define compat_module_param_array(name, type, count, perm) \ #define compat_module_param_array(name, type, count, perm) \
module_param_array(name, type, NULL, perm) module_param_array(name, type, NULL, perm)
#endif #endif
@ -105,7 +99,7 @@ typedef struct { volatile unsigned long lock[2]; } xnlock_t;
#ifndef list_first_entry #ifndef list_first_entry
#define list_first_entry(ptr, type, member) \ #define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member) list_entry((ptr)->next, type, member)
#endif #endif
#ifndef local_irq_save_hw_smp #ifndef local_irq_save_hw_smp
@ -120,9 +114,9 @@ typedef struct { volatile unsigned long lock[2]; } xnlock_t;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define DECLARE_XNLOCK(lock) xnlock_t lock #define DECLARE_XNLOCK(lock) xnlock_t lock
#define DECLARE_EXTERN_XNLOCK(lock) extern xnlock_t lock #define DECLARE_EXTERN_XNLOCK(lock) extern xnlock_t lock
#define DEFINE_XNLOCK(lock) xnlock_t lock = XNARCH_LOCK_UNLOCKED #define DEFINE_XNLOCK(lock) xnlock_t lock = XNARCH_LOCK_UNLOCKED
#define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock) #define DEFINE_PRIVATE_XNLOCK(lock) static DEFINE_XNLOCK(lock)
static inline void xnlock_init(xnlock_t *lock) static inline void xnlock_init(xnlock_t *lock)
@ -189,9 +183,9 @@ static inline void xnlock_put_irqrestore(xnlock_t *lock, spl_t flags)
#define DEFINE_XNLOCK(lock) #define DEFINE_XNLOCK(lock)
#define DEFINE_PRIVATE_XNLOCK(lock) #define DEFINE_PRIVATE_XNLOCK(lock)
#define xnlock_init(lock) do { } while(0) #define xnlock_init(lock) do { } while(0)
#define xnlock_get(lock) rtai_cli() #define xnlock_get(lock) rtai_cli()
#define xnlock_put(lock) rtai_sti() #define xnlock_put(lock) rtai_sti()
#define xnlock_get_irqsave(lock, flags) rtai_save_flags_and_cli(flags) #define xnlock_get_irqsave(lock, flags) rtai_save_flags_and_cli(flags)
#define xnlock_put_irqrestore(lock, flags) rtai_restore_flags(flags) #define xnlock_put_irqrestore(lock, flags) rtai_restore_flags(flags)
@ -209,7 +203,7 @@ static inline void xnlock_put_irqrestore(xnlock_t *lock, spl_t flags)
#define xnprintf(fmt, args...) printk(KERN_INFO XNARCH_PROMPT fmt, ##args) #define xnprintf(fmt, args...) printk(KERN_INFO XNARCH_PROMPT fmt, ##args)
#define xnlogerr(fmt, args...) printk(KERN_ERR XNARCH_PROMPT fmt, ##args) #define xnlogerr(fmt, args...) printk(KERN_ERR XNARCH_PROMPT fmt, ##args)
#define xnlogwarn xnlogerr #define xnlogwarn xnlogerr
// user space access (taken from Linux) // user space access (taken from Linux)
@ -360,19 +354,16 @@ typedef atomic_t atomic_counter_t;
typedef RTIME xnticks_t; typedef RTIME xnticks_t;
typedef struct xnstat_exectime { typedef struct xnstat_exectime {
xnticks_t start; xnticks_t start;
xnticks_t total; xnticks_t total;
} xnstat_exectime_t; } xnstat_exectime_t;
typedef struct xnstat_counter { typedef struct xnstat_counter {
int counter; int counter;
} xnstat_counter_t; } xnstat_counter_t;
#define xnstat_counter_inc(c) ((c)->counter++) #define xnstat_counter_inc(c) ((c)->counter++)
typedef struct xnintr { typedef struct xnintr {
#ifdef CONFIG_RTAI_RTDM_SHIRQ
struct xnintr *next;
#endif /* CONFIG_RTAI_RTDM_SHIRQ */
unsigned unhandled; unsigned unhandled;
xnisr_t isr; xnisr_t isr;
void *cookie; void *cookie;
@ -434,14 +425,14 @@ extern unsigned long IsolCpusMask;
// support for RTDM timers // support for RTDM timers
struct rtdm_timer_struct { struct rtdm_timer_struct {
struct rtdm_timer_struct *next, *prev; struct rtdm_timer_struct *next, *prev;
int priority, cpuid; int priority, cpuid;
RTIME firing_time, period; RTIME firing_time, period;
void (*handler)(unsigned long); void (*handler)(unsigned long);
unsigned long data; unsigned long data;
#ifdef CONFIG_RTAI_LONG_TIMED_LIST #ifdef CONFIG_RTAI_LONG_TIMED_LIST
rb_root_t rbr; rb_root_t rbr;
rb_node_t rbn; rb_node_t rbn;
#endif #endif
}; };
@ -455,18 +446,18 @@ typedef struct rtdm_timer_struct xntimer_t;
/* Timer modes */ /* Timer modes */
typedef enum xntmode { typedef enum xntmode {
XN_RELATIVE, XN_RELATIVE,
XN_ABSOLUTE, XN_ABSOLUTE,
XN_REALTIME XN_REALTIME
} xntmode_t; } xntmode_t;
#define xntbase_ns2ticks(rtdm_tbase, expiry) nano2count(expiry) #define xntbase_ns2ticks(rtdm_tbase, expiry) nano2count(expiry)
static inline void xntimer_init(xntimer_t *timer, void (*handler)(xntimer_t *)) static inline void xntimer_init(xntimer_t *timer, void (*handler)(xntimer_t *))
{ {
memset(timer, 0, sizeof(struct rtdm_timer_struct)); memset(timer, 0, sizeof(struct rtdm_timer_struct));
timer->handler = (void *)handler; timer->handler = (void *)handler;
timer->data = (unsigned long)timer; timer->data = (unsigned long)timer;
timer->next = timer->prev = timer; timer->next = timer->prev = timer;
} }
@ -479,12 +470,12 @@ static inline int xntimer_start(xntimer_t *timer, xnticks_t value, xnticks_t int
static inline void xntimer_destroy(xntimer_t *timer) static inline void xntimer_destroy(xntimer_t *timer)
{ {
rt_timer_remove(timer); rt_timer_remove(timer);
} }
static inline void xntimer_stop(xntimer_t *timer) static inline void xntimer_stop(xntimer_t *timer)
{ {
rt_timer_remove(timer); rt_timer_remove(timer);
} }
// support for use in RTDM usage testing found in RTAI SHOWROOM CVS // support for use in RTDM usage testing found in RTAI SHOWROOM CVS
@ -492,11 +483,11 @@ static inline void xntimer_stop(xntimer_t *timer)
static inline unsigned long long xnarch_ulldiv(unsigned long long ull, unsigned static inline unsigned long long xnarch_ulldiv(unsigned long long ull, unsigned
long uld, unsigned long *r) long uld, unsigned long *r)
{ {
unsigned long rem = do_div(ull, uld); unsigned long rem = do_div(ull, uld);
if (r) { if (r) {
*r = rem; *r = rem;
} }
return ull; return ull;
} }
// support for RTDM select // support for RTDM select
@ -559,15 +550,15 @@ static inline int emptyq_p(xnqueue_t *queue)
#define xnpod_schedule rt_schedule_readied #define xnpod_schedule rt_schedule_readied
#define xnthread_t RT_TASK #define xnthread_t RT_TASK
#define xnpod_current_thread _rt_whoami #define xnpod_current_thread _rt_whoami
#define xnthread_test_info rt_task_test_taskq_retval #define xnthread_test_info rt_task_test_taskq_retval
#define xnsynch_t TASKQ #define xnsynch_t TASKQ
#define xnsynch_init(s, f, p) rt_taskq_init(s, f) #define xnsynch_init(s, f, p) rt_taskq_init(s, f)
#define xnsynch_destroy rt_taskq_delete #define xnsynch_destroy rt_taskq_delete
#define xnsynch_wakeup_one_sleeper rt_taskq_ready_one #define xnsynch_wakeup_one_sleeper rt_taskq_ready_one
#define xnsynch_flush rt_taskq_ready_all #define xnsynch_flush rt_taskq_ready_all
static inline void xnsynch_sleep_on(void *synch, xnticks_t timeout, xntmode_t timeout_mode) static inline void xnsynch_sleep_on(void *synch, xnticks_t timeout, xntmode_t timeout_mode)
{ {
if (timeout == XN_INFINITE) { if (timeout == XN_INFINITE) {
@ -594,55 +585,39 @@ static inline void xnsynch_sleep_on(void *synch, xnticks_t timeout, xntmode_t ti
#define rthal_apc_schedule(apc) \ #define rthal_apc_schedule(apc) \
rt_pend_linux_srq((apc)) rt_pend_linux_srq((apc))
#ifdef CONFIG_RTAI_RTDM_SELECT
#define SELECT_SIGNAL(select_block, state) \
do { \
spl_t flags; \
xnlock_get_irqsave(&nklock, flags); \
if (xnselect_signal(select_block, state) && state) { \
xnpod_schedule(); \
} \
xnlock_put_irqrestore(&nklock, flags); \
} while (0)
#else
#define SELECT_SIGNAL(select_block, state) do { } while (0) #define SELECT_SIGNAL(select_block, state) do { } while (0)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
#define __WORK_INITIALIZER(n,f,d) { \ #define __WORK_INITIALIZER(n,f,d) { \
.list = { &(n).list, &(n).list }, \ .list = { &(n).list, &(n).list }, \
.sync = 0, \ .sync = 0, \
.routine = (f), \ .routine = (f), \
.data = (d), \ .data = (d), \
} }
#define DECLARE_WORK(n,f,d) struct tq_struct n = __WORK_INITIALIZER(n, f, d) #define DECLARE_WORK(n,f,d) struct tq_struct n = __WORK_INITIALIZER(n, f, d)
#define DECLARE_WORK_NODATA(n, f) DECLARE_WORK(n, f, NULL) #define DECLARE_WORK_NODATA(n, f) DECLARE_WORK(n, f, NULL)
#define DECLARE_WORK_FUNC(f) void f(void *cookie) #define DECLARE_WORK_FUNC(f) void f(void *cookie)
#define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_WORK(n, f, NULL) #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_WORK(n, f, NULL)
#define schedule_delayed_work(work, delay) do { \ #define schedule_delayed_work(work, delay) do { \
if (delay) { \ if (delay) { \
set_current_state(TASK_UNINTERRUPTIBLE); \ set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout(delay); \ schedule_timeout(delay); \
} \ } \
schedule_task(work); \ schedule_task(work); \
} while (0) } while (0)
#else #else
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
#define DECLARE_WORK_NODATA(f, n) DECLARE_WORK(f, n, NULL) #define DECLARE_WORK_NODATA(f, n) DECLARE_WORK(f, n, NULL)
#define DECLARE_WORK_FUNC(f) void f(void *cookie) #define DECLARE_WORK_FUNC(f) void f(void *cookie)
#define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f, NULL) #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f, NULL)
#else /* >= 2.6.20 */ #else /* >= 2.6.20 */
#define DECLARE_WORK_NODATA(f, n) DECLARE_WORK(f, n) #define DECLARE_WORK_NODATA(f, n) DECLARE_WORK(f, n)
#define DECLARE_WORK_FUNC(f) void f(struct work_struct *work) #define DECLARE_WORK_FUNC(f) void f(struct work_struct *work)
#define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f) #define DECLARE_DELAYED_WORK_NODATA(n, f) DECLARE_DELAYED_WORK(n, f)
#endif /* >= 2.6.20 */ #endif /* >= 2.6.20 */

View file

@ -1,8 +1,4 @@
OPTDIRS= OPTDIRS =
if CONFIG_RTAI_BITS
OPTDIRS += bits
endif
if CONFIG_RTAI_FIFOS if CONFIG_RTAI_FIFOS
OPTDIRS += fifos OPTDIRS += fifos
@ -24,10 +20,6 @@ if CONFIG_RTAI_MBX
OPTDIRS += mbx OPTDIRS += mbx
endif endif
if CONFIG_RTAI_TBX
OPTDIRS += tbx
endif
if CONFIG_RTAI_MQ if CONFIG_RTAI_MQ
OPTDIRS += mq OPTDIRS += mq
endif endif

View file

@ -1,48 +0,0 @@
moduledir = @RTAI_MODULE_DIR@
modext = @RTAI_MODULE_EXT@
libbits_a_SOURCES = bits.c
if CONFIG_KBUILD
if CONFIG_RTAI_BITS_BUILTIN
rtai_bits.ko:
else
rtai_bits.ko: @RTAI_KBUILD_ENV@
rtai_bits.ko: $(libbits_a_SOURCES)
@RTAI_KBUILD_TOP@ \
@RTAI_KBUILD_CMD@ \
@RTAI_KBUILD_BOTTOM@
clean-local:
@RTAI_KBUILD_CLEAN@
endif
else
noinst_LIBRARIES = libbits.a
libbits_a_AR = ar cru
AM_CPPFLAGS = \
@RTAI_KMOD_CFLAGS@ \
-I$(top_srcdir)/base/include \
-I../../include
rtai_bits.o: libbits.a
$(LD) --whole-archive $< -r -o $@
endif
all-local: rtai_bits$(modext)
if !CONFIG_RTAI_BITS_BUILTIN
if CONFIG_RTAI_OLD_FASHIONED_BUILD
$(mkinstalldirs) $(top_srcdir)/modules
$(INSTALL_DATA) $^ $(top_srcdir)/modules
endif
install-exec-local: rtai_bits$(modext)
$(mkinstalldirs) $(DESTDIR)$(moduledir)
$(INSTALL_DATA) $< $(DESTDIR)$(moduledir)
endif
EXTRA_DIST = Makefile.kbuild

View file

@ -1,8 +0,0 @@
EXTRA_CFLAGS += -I$(rtai_srctree)/base/include \
-I$(src)/../../include \
-I$(src)/../../.. \
-D__IN_RTAI__
obj-m += rtai_bits.o
rtai_bits-objs := $(rtai_objs)

View file

@ -1,104 +0,0 @@
Bits
====
This module provides helper functions allowing compound synchronizations based
on AND/ORs on a 32bits unsigned long. In other OSes similar objects are often
referred as flags/events. Their use is similar to that of semaphores except
that signal/waits are not related just to a simple counter but depends on a
combinations of bits set.
Test operations provided:
Single tests:
#define ALL_SET: test if an entire mask is set
#define ANY_SET: test if any entry of a mask is set
#define ALL_CLR: test if an entire mask is cleared
#define ANY_CLR: test if any entry of a mask is cleared
Combined tests (meaning easily inferred from macros names):
#define ALL_SET_AND_ANY_SET
#define ALL_SET_AND_ALL_CLR
#define ALL_SET_AND_ANY_CLR
#define ANY_SET_AND_ALL_CLR
#define ANY_SET_AND_ANY_CLR
#define ALL_CLR_AND_ANY_CLR
#define ALL_SET_OR_ANY_SET
#define ALL_SET_OR_ALL_CLR
#define ALL_SET_OR_ANY_CLR
#define ANY_SET_OR_ALL_CLR
#define ANY_SET_OR_ANY_CLR
#define ALL_CLR_OR_ANY_CLR
Bit operations provided:
#define SET_BITS: set bits according to the given mask
#define CLR_BITS: clear bits according to the given mask
#define SET_CLR_BITS: combined operation
#define NOP_BITS: do nothing
Services provided:
struct rt_bits_struct {
struct rt_queue queue; // must be first in struct
int magic;
int type; // to align mask to semaphore count, for easier uspace init
unsigned long mask;
};
typedef struct rt_bits_struct BITS;
To be noticed. The int variable type above is inserted to keep
rt_bits_struct aligned with the initial part of SEM, to ease use in user
space. For the same reason BITS_MAGIC and error returns are also the same
as those of SEM. Not so strange, bits code and APIs are very similar to
those of semaphores.
#include <rtai_bits.h>
- void rt_bits_init(BITS *bits, unsigned long mask)
create and initialize the bits structure pointed by bits support
structure, setting bits mask to mask.
- int rt_bits_delete(BITS *bits)
delete bits; returns: BITS_ERR if bits has already been deleted, 0 if OK.
- unsigned long rt_get_bits(BITS *bits)
get the actual value of bits mask.
- unsigned long rt_bits_signal(BITS *bits, int setfun, unsigned long masks)
execute setfun, any bits operation above, or/anding masks onto the actual bits
mask, schedule any task blocked on bits if the new bits mask meets its request;
returns the value of bits after executing setfun; in case of combined operations
masks is to be cast to a pointer of a two elements array of unsigned longs
containing the masks to be used for the combined setfun used.
- int rt_bits_reset(BITS *bits, unsigned long mask)
unconditionally schedule any task blocked on bits and reset its mask to mask;
returns the value of bits mask before being reset to mask.
- int rt_bits_wait(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask)
test bits mask against testmasks according to testfun, any of the test funs
above, if the test is not satisfied block the task; whenever the condition is
met execute exitfun, any bits operation above, using exitmasks, save the
the mask resulting after the whole processing in the variable pointed by
resulting_mask; returns: BITS_ERR if the task was blocked and resumed because
bits was deleted, 0 if OK; in case of combined operations testmasks and/or
exitmasks are to be cast to pointers of two elements arrays of unsigned
longs containing the masks to be used for the combined tests/executions.
- int rt_bits_wait_if(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask)
as rt_bits_wait but does not block if testfun is not satisfied; returns 1 if
it succeeded, 0 if it failed.
- int rt_bits_wait_until(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME time, unsigned long *resulting_mask)
as rt_bits_wait but waits at most till time expires; returns the same values
also, BITS_TIMOUT if the task blocked and has been resumed because a timeout
occured.
- unsigned long rt_bits_wait_timed(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME delay, unsigned long *resulting_mask)
as rt_bits_wait_until but waits at most for delay to meet the required
condition.
As usual, comments and bug fixes are welcomed.
Paolo Mantegazza (mantegazza@aero.polimi.it)

View file

@ -1,493 +0,0 @@
/*
* Copyright (C) 1999-2003 Paolo Mantegazza <mantegazza@aero.polimi.it>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/version.h>
#include <asm/uaccess.h>
#include <rtai_schedcore.h>
#include <rtai_sched.h>
#include <rtai_lxrt.h>
#include <rtai_bits.h>
MODULE_LICENSE("GPL");
#define MASK0(x) ((unsigned long *)&(x))[0]
#define MASK1(x) ((unsigned long *)&(x))[1]
static int all_set(BITS *bits, unsigned long mask)
{
return (bits->mask & mask) == mask;
}
static int any_set(BITS *bits, unsigned long mask)
{
return (bits->mask & mask);
}
static int all_clr(BITS *bits, unsigned long mask)
{
return (~bits->mask & mask) == mask;
}
static int any_clr(BITS *bits, unsigned long mask)
{
return (~bits->mask & mask);
}
static int all_set_and_any_set(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK1(masks)) && (bits->mask & MASK0(masks)) == MASK0(masks);
}
static int all_set_and_all_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) == MASK0(masks) && (~bits->mask & MASK1(masks)) == MASK1(masks);
}
static int all_set_and_any_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) == MASK0(masks) && (~bits->mask & MASK1(masks));
}
static int any_set_and_all_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) && (~bits->mask & MASK1(masks)) == MASK1(masks);
}
static int any_set_and_any_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) && (~bits->mask & MASK1(masks));
}
static int all_clr_and_any_clr(BITS *bits, unsigned long masks)
{
return (~bits->mask & MASK1(masks)) && (~bits->mask & MASK0(masks)) == MASK0(masks);
}
static int all_set_or_any_set(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK1(masks)) || (bits->mask & MASK0(masks)) == MASK0(masks);
}
static int all_set_or_all_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) == MASK0(masks) || (~bits->mask & MASK1(masks)) == MASK1(masks);
}
static int all_set_or_any_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) == MASK0(masks) || (~bits->mask & MASK1(masks));
}
static int any_set_or_all_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) || (~bits->mask & MASK1(masks)) == MASK1(masks);
}
static int any_set_or_any_clr(BITS *bits, unsigned long masks)
{
return (bits->mask & MASK0(masks)) || (~bits->mask & MASK1(masks));
}
static int all_clr_or_any_clr(BITS *bits, unsigned long masks)
{
return (~bits->mask & MASK1(masks)) || (~bits->mask & MASK0(masks)) == MASK0(masks);
}
static void set_bits_mask(BITS *bits, unsigned long mask)
{
bits->mask |= mask;
}
static void clr_bits_mask(BITS *bits, unsigned long mask)
{
bits->mask &= ~mask;
}
static void set_clr_bits_mask(BITS *bits, unsigned long masks)
{
bits->mask = (bits->mask | MASK0(masks)) & ~MASK1(masks);
}
static void nop_fun(BITS *bits, unsigned long mask)
{
}
static int (*test_fun[])(BITS *, unsigned long) = {
all_set, any_set, all_clr, any_clr,
all_set_and_any_set, all_set_and_all_clr, all_set_and_any_clr,
any_set_and_all_clr, any_set_and_any_clr,
all_clr_and_any_clr,
all_set_or_any_set, all_set_or_all_clr, all_set_or_any_clr,
any_set_or_all_clr, any_set_or_any_clr,
all_clr_or_any_clr
};
static void (*exec_fun[])(BITS *, unsigned long) = {
set_bits_mask, clr_bits_mask,
set_clr_bits_mask,
nop_fun
};
#define CHECK_BITS_MAGIC(bits) \
do { if (bits->magic != RT_BITS_MAGIC) return RTE_OBJINV; } while (0)
void rt_bits_init(BITS *bits, unsigned long mask)
{
bits->magic = RT_BITS_MAGIC;
bits->queue.prev = &(bits->queue);
bits->queue.next = &(bits->queue);
bits->queue.task = 0;
bits->mask = mask;
}
int rt_bits_delete(BITS *bits)
{
unsigned long flags, schedmap;
RT_TASK *task;
QUEUE *q;
CHECK_BITS_MAGIC(bits);
schedmap = 0;
q = &bits->queue;
flags = rt_global_save_flags_and_cli();
bits->magic = 0;
while ((q = q->next) != &bits->queue && (task = q->task)) {
rem_timed_task(task);
if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
task->blocked_on = RTP_OBJREM;
enq_ready_task(task);
#ifdef CONFIG_SMP
set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
}
}
RT_SCHEDULE_MAP(schedmap);
rt_global_restore_flags(flags);
return 0;
}
#define TEST_BUF(x, y) do { (x)->retval = (unsigned long)(y); } while (0)
#define TEST_FUN(x) ((long *)((unsigned long)(x)->retval))[0]
#define TEST_MASK(x) ((unsigned long *)((unsigned long)(x)->retval))[1]
RTAI_SYSCALL_MODE unsigned long rt_get_bits(BITS *bits)
{
return bits->mask;
}
RTAI_SYSCALL_MODE unsigned long rt_bits_reset(BITS *bits, unsigned long mask)
{
unsigned long flags, schedmap, oldmask;
RT_TASK *task;
QUEUE *q;
CHECK_BITS_MAGIC(bits);
schedmap = 0;
q = &bits->queue;
flags = rt_global_save_flags_and_cli();
oldmask = bits->mask;
bits->mask = mask;
while ((q = q->next) != &bits->queue) {
dequeue_blocked(task = q->task);
rem_timed_task(task);
if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
enq_ready_task(task);
#ifdef CONFIG_SMP
set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
}
}
bits->queue.prev = bits->queue.next = &bits->queue;
RT_SCHEDULE_MAP(schedmap);
rt_global_restore_flags(flags);
return oldmask;
}
RTAI_SYSCALL_MODE unsigned long rt_bits_signal(BITS *bits, int setfun, unsigned long masks)
{
unsigned long flags, schedmap;
RT_TASK *task;
QUEUE *q;
CHECK_BITS_MAGIC(bits);
schedmap = 0;
q = &bits->queue;
flags = rt_global_save_flags_and_cli();
exec_fun[setfun](bits, masks);
masks = bits->mask;
while ((q = q->next) != &bits->queue) {
task = q->task;
if (test_fun[TEST_FUN(task)](bits, TEST_MASK(task))) {
dequeue_blocked(task);
rem_timed_task(task);
if (task->state != RT_SCHED_READY && (task->state &= ~(RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED)) == RT_SCHED_READY) {
enq_ready_task(task);
#ifdef CONFIG_SMP
set_bit(task->runnable_on_cpus & 0x1F, &schedmap);
#endif
}
}
}
RT_SCHEDULE_MAP(schedmap);
rt_global_restore_flags(flags);
return masks;
}
RTAI_SYSCALL_MODE int _rt_bits_wait(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space)
{
RT_TASK *rt_current;
unsigned long flags, mask = 0;
int retval;
CHECK_BITS_MAGIC(bits);
flags = rt_global_save_flags_and_cli();
if (!test_fun[testfun](bits, testmasks)) {
void *retpnt;
long bits_test[2];
rt_current = RT_CURRENT;
TEST_BUF(rt_current, bits_test);
TEST_FUN(rt_current) = testfun;
TEST_MASK(rt_current) = testmasks;
rt_current->state |= RT_SCHED_SEMAPHORE;
rem_ready_current(rt_current);
enqueue_blocked(rt_current, &bits->queue, 1);
rt_schedule();
if (unlikely((retpnt = rt_current->blocked_on) != NULL)) {
if (likely(retpnt != RTP_OBJREM)) {
dequeue_blocked(rt_current);
retval = RTE_UNBLKD;
} else {
rt_current->prio_passed_to = NULL;
retval = RTE_OBJREM;
}
goto retmask;
}
}
retval = 0;
mask = bits->mask;
exec_fun[exitfun](bits, exitmasks);
retmask:
rt_global_restore_flags(flags);
if (resulting_mask) {
if (space) {
*resulting_mask = mask;
} else {
rt_copy_to_user(resulting_mask, &mask, sizeof(mask));
}
}
return retval;
}
RTAI_SYSCALL_MODE int _rt_bits_wait_if(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space)
{
unsigned long flags, mask;
int retval;
CHECK_BITS_MAGIC(bits);
flags = rt_global_save_flags_and_cli();
mask = bits->mask;
if (test_fun[testfun](bits, testmasks)) {
exec_fun[exitfun](bits, exitmasks);
retval = 1;
} else {
retval = 0;
}
rt_global_restore_flags(flags);
if (resulting_mask) {
if (space) {
*resulting_mask = mask;
} else {
rt_copy_to_user(resulting_mask, &mask, sizeof(mask));
}
}
return retval;
}
RTAI_SYSCALL_MODE int _rt_bits_wait_until(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME time, unsigned long *resulting_mask, int space)
{
RT_TASK *rt_current;
unsigned long flags, mask = 0;
int retval;
CHECK_BITS_MAGIC(bits);
flags = rt_global_save_flags_and_cli();
if (!test_fun[testfun](bits, testmasks)) {
void *retpnt;
long bits_test[2];
rt_current = RT_CURRENT;
TEST_BUF(rt_current, bits_test);
TEST_FUN(rt_current) = testfun;
TEST_MASK(rt_current) = testmasks;
rt_current->blocked_on = &bits->queue;
if ((rt_current->resume_time = time) > get_time()) {
rt_current->state |= (RT_SCHED_SEMAPHORE | RT_SCHED_DELAYED);
rem_ready_current(rt_current);
enqueue_blocked(rt_current, &bits->queue, 1);
enq_timed_task(rt_current);
rt_schedule();
} else {
rt_current->queue.prev = rt_current->queue.next = &rt_current->queue;
}
if (unlikely((retpnt = rt_current->blocked_on) != NULL)) {
if (likely(retpnt != RTP_OBJREM)) {
dequeue_blocked(rt_current);
retval = likely(retpnt > RTP_HIGERR) ? RTE_TIMOUT : RTE_UNBLKD;
} else {
rt_current->prio_passed_to = NULL;
retval = RTE_OBJREM;
}
goto retmask;
}
}
retval = 0;
mask = bits->mask;
exec_fun[exitfun](bits, exitmasks);
retmask:
rt_global_restore_flags(flags);
if (resulting_mask) {
if (space) {
*resulting_mask = mask;
} else {
rt_copy_to_user(resulting_mask, &mask, sizeof(mask));
}
}
return retval;
}
RTAI_SYSCALL_MODE int _rt_bits_wait_timed(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, RTIME delay, unsigned long *resulting_mask, int space)
{
return _rt_bits_wait_until(bits, testfun, testmasks, exitfun, exitmasks, get_time() + delay, resulting_mask, space);
}
/* +++++++++++++++++++++++++++++ NAMED BITS +++++++++++++++++++++++++++++++++ */
#include <rtai_registry.h>
RTAI_SYSCALL_MODE BITS *rt_named_bits_init(const char *bits_name, unsigned long mask)
{
BITS *bits;
unsigned long name;
if ((bits = rt_get_adr(name = nam2num(bits_name)))) {
return bits;
}
if ((bits = rt_malloc(sizeof(SEM)))) {
rt_bits_init(bits, mask);
if (rt_register(name, bits, IS_BIT, 0)) {
return bits;
}
rt_bits_delete(bits);
}
rt_free(bits);
return NULL;
}
RTAI_SYSCALL_MODE int rt_named_bits_delete(BITS *bits)
{
if (!rt_bits_delete(bits)) {
rt_free(bits);
}
return rt_drg_on_adr(bits);
}
RTAI_SYSCALL_MODE void *rt_bits_init_u(unsigned long name, unsigned long mask)
{
BITS *bits;
if (rt_get_adr(name)) {
return NULL;
}
if ((bits = rt_malloc(sizeof(BITS)))) {
rt_bits_init(bits, mask);
if (rt_register(name, bits, IS_BIT, current)) {
return bits;
} else {
rt_free(bits);
}
}
return NULL;
}
RTAI_SYSCALL_MODE int rt_bits_delete_u(BITS *bits)
{
if (rt_bits_delete(bits)) {
return -EFAULT;
}
rt_free(bits);
return rt_drg_on_adr(bits);
}
/* ++++++++++++++++++++++++++++ BITS ENTRIES ++++++++++++++++++++++++++++++++ */
struct rt_native_fun_entry rt_bits_entries[] = {
{ { 0, rt_bits_init_u }, BITS_INIT },
{ { 0, rt_bits_delete_u }, BITS_DELETE },
{ { 0, rt_named_bits_init }, NAMED_BITS_INIT },
{ { 0, rt_named_bits_delete }, NAMED_BITS_DELETE },
{ { 1, rt_get_bits }, BITS_GET },
{ { 1, rt_bits_reset }, BITS_RESET },
{ { 1, rt_bits_signal }, BITS_SIGNAL },
{ { 1, _rt_bits_wait }, BITS_WAIT },
{ { 1, _rt_bits_wait_if }, BITS_WAIT_IF },
{ { 1, _rt_bits_wait_until }, BITS_WAIT_UNTIL },
{ { 1, _rt_bits_wait_timed }, BITS_WAIT_TIMED },
{ { 0, 0 }, 000 }
};
extern int set_rt_fun_entries(struct rt_native_fun_entry *entry);
extern void reset_rt_fun_entries(struct rt_native_fun_entry *entry);
int __rtai_bits_init(void)
{
return set_rt_fun_entries(rt_bits_entries);
}
void __rtai_bits_exit(void)
{
reset_rt_fun_entries(rt_bits_entries);
}
#ifndef CONFIG_RTAI_BITS_BUILTIN
module_init(__rtai_bits_init);
module_exit(__rtai_bits_exit);
#endif /* !CONFIG_RTAI_BITS_BUILTIN */
#ifdef CONFIG_KBUILD
EXPORT_SYMBOL(rt_bits_init);
EXPORT_SYMBOL(rt_bits_delete);
EXPORT_SYMBOL(rt_get_bits);
EXPORT_SYMBOL(rt_bits_reset);
EXPORT_SYMBOL(rt_bits_signal);
EXPORT_SYMBOL(_rt_bits_wait);
EXPORT_SYMBOL(_rt_bits_wait_if);
EXPORT_SYMBOL(_rt_bits_wait_until);
EXPORT_SYMBOL(_rt_bits_wait_timed);
EXPORT_SYMBOL(rt_named_bits_init);
EXPORT_SYMBOL(rt_named_bits_delete);
EXPORT_SYMBOL(rt_bits_init_u);
EXPORT_SYMBOL(rt_bits_delete_u);
#endif /* CONFIG_KBUILD */

View file

@ -61,19 +61,19 @@ ACKNOWLEDGEMENTS:
* *
* <CENTER><TABLE> * <CENTER><TABLE>
* <TR><TD> Called from RT task </TD><TD> Called from Linux process </TD></TR> * <TR><TD> Called from RT task </TD><TD> Called from Linux process </TD></TR>
* <TR><TD> #rtf_create </TD><TD> #rtf_open_sized <BR> * <TR><TD> #rtf_create </TD><TD> #rtf_open_sized <BR>
* [open] </TD></TR> * [open] </TD></TR>
* <TR><TD> #rtf_destroy </TD><TD> [close] </TD></TR> * <TR><TD> #rtf_destroy </TD><TD> [close] </TD></TR>
* <TR><TD> #rtf_reset </TD><TD> #rtf_reset </TD></TR> * <TR><TD> #rtf_reset </TD><TD> #rtf_reset </TD></TR>
* <TR><TD> #rtf_resize </TD><TD> #rtf_resize </TD></TR> * <TR><TD> #rtf_resize </TD><TD> #rtf_resize </TD></TR>
* <TR><TD> #rtf_get </TD><TD> [read] <BR> * <TR><TD> #rtf_get </TD><TD> [read] <BR>
* #rtf_read_timed <BR> * #rtf_read_timed <BR>
* #rtf_read_all_at_once </TD></TR> * #rtf_read_all_at_once </TD></TR>
* <TR><TD> #rtf_put </TD><TD> [write] <BR> * <TR><TD> #rtf_put </TD><TD> [write] <BR>
* #rtf_write_timed </TD></TR> * #rtf_write_timed </TD></TR>
* <TR><TD> #rtf_create_handler </TD><TD> </TD></TR> * <TR><TD> #rtf_create_handler </TD><TD> </TD></TR>
* <TR><TD> </TD><TD> #rtf_suspend_timed </TD></TR> * <TR><TD> </TD><TD> #rtf_suspend_timed </TD></TR>
* <TR><TD> </TD><TD> #rtf_set_async_sig </TD></TR> * <TR><TD> </TD><TD> #rtf_set_async_sig </TD></TR>
* </TABLE></CENTER> * </TABLE></CENTER>
* *
* In Linux, fifos have to be created by : * In Linux, fifos have to be created by :
@ -115,12 +115,12 @@ ACKNOWLEDGEMENTS:
* *
* <CENTER><TABLE> * <CENTER><TABLE>
* <TR><TD> Called from RT task </TD><TD> Called from Linux process </TD></TR> * <TR><TD> Called from RT task </TD><TD> Called from Linux process </TD></TR>
* <TR><TD> #rtf_sem_init </TD><TD> #rtf_sem_init </TD></TR> * <TR><TD> #rtf_sem_init </TD><TD> #rtf_sem_init </TD></TR>
* <TR><TD> #rtf_sem_post </TD><TD> #rtf_sem_post </TD></TR> * <TR><TD> #rtf_sem_post </TD><TD> #rtf_sem_post </TD></TR>
* <TR><TD> #rtf_sem_trywait </TD><TD> #rtf_sem_wait <BR> * <TR><TD> #rtf_sem_trywait </TD><TD> #rtf_sem_wait <BR>
* #rtf_sem_trywait <BR> * #rtf_sem_trywait <BR>
* #rtf_sem_timed_wait </TD></TR> * #rtf_sem_timed_wait </TD></TR>
* <TR><TD> #rtf_sem_destroy </TD><TD> #rtf_sem_destroy </TD></TR> * <TR><TD> #rtf_sem_destroy </TD><TD> #rtf_sem_destroy </TD></TR>
* </TABLE></CENTER> * </TABLE></CENTER>
* *
* To add a bit of confusion (J), with respect to RTAI schedulers semaphore * To add a bit of confusion (J), with respect to RTAI schedulers semaphore
@ -591,7 +591,7 @@ static inline void mbx_init(F_MBX *mbx, int size, char *bufadr)
mbx->size = mbx->frbs = size; mbx->size = mbx->frbs = size;
mbx->fbyte = mbx->lbyte = mbx->avbs = 0; mbx->fbyte = mbx->lbyte = mbx->avbs = 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
spin_lock_init(&mbx->buflock); spin_lock_init(&mbx->buflock);
#endif #endif
spin_lock_init(&(mbx->buflock)); spin_lock_init(&(mbx->buflock));
} }
@ -1577,7 +1577,7 @@ static int rtf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, u
} }
case FIONREAD: { case FIONREAD: {
return put_user(fifo[minor].mbx.avbs, (int *)arg); return put_user(fifo[minor].mbx.avbs, (int *)arg);
} }
/* /*
* Support for named FIFOS : Ian Soanes (ians@zentropix.com) * Support for named FIFOS : Ian Soanes (ians@zentropix.com)
* Based on ideas from Stuart Hughes and David Schleef * Based on ideas from Stuart Hughes and David Schleef
@ -1597,10 +1597,10 @@ static int rtf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, u
struct rt_fifo_info_struct info; struct rt_fifo_info_struct info;
info.fifo_number = i; info.fifo_number = i;
info.size = fifo[i].mbx.size; info.size = fifo[i].mbx.size;
info.opncnt = fifo[i].opncnt; info.opncnt = fifo[i].opncnt;
info.avbs = fifo[i].mbx.avbs; info.avbs = fifo[i].mbx.avbs;
info.frbs = fifo[i].mbx.frbs; info.frbs = fifo[i].mbx.frbs;
strncpy(info.name, fifo[i].name, RTF_NAMELEN+1); strncpy(info.name, fifo[i].name, RTF_NAMELEN+1);
rt_copy_to_user(req.ptr + n, &info, sizeof(info)); rt_copy_to_user(req.ptr + n, &info, sizeof(info));
} }
@ -1611,22 +1611,22 @@ static int rtf_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, u
rt_copy_from_user(&args, (void *)arg, sizeof(args)); rt_copy_from_user(&args, (void *)arg, sizeof(args));
return rtf_named_create(args.name, args.size); return rtf_named_create(args.name, args.size);
} }
case RTF_CREATE_NAMED: { case RTF_CREATE_NAMED: {
char name[RTF_NAMELEN+1]; char name[RTF_NAMELEN+1];
rt_copy_from_user(name, (void *)arg, RTF_NAMELEN+1); rt_copy_from_user(name, (void *)arg, RTF_NAMELEN+1);
return rtf_create_named(name); return rtf_create_named(name);
} }
case RTF_NAME_LOOKUP: { case RTF_NAME_LOOKUP: {
char name[RTF_NAMELEN+1]; char name[RTF_NAMELEN+1];
rt_copy_from_user(name, (void *)arg, RTF_NAMELEN+1); rt_copy_from_user(name, (void *)arg, RTF_NAMELEN+1);
return rtf_getfifobyname(name); return rtf_getfifobyname(name);
} }
case TCGETS: case TCGETS:
/* Keep isatty() probing silent */ /* Keep isatty() probing silent */
return -ENOTTY; return -ENOTTY;
default : { default : {
printk("RTAI-FIFO: cmd %d is not implemented\n", cmd); printk("RTAI-FIFO: cmd %d is not implemented\n", cmd);
@ -1694,9 +1694,9 @@ RTAI_MODULE_PARM(MaxFifos, int);
static struct rt_fun_entry rtai_fifos_fun[] = { static struct rt_fun_entry rtai_fifos_fun[] = {
[_CREATE] = { 0, rtf_create }, [_CREATE] = { 0, rtf_create },
[_DESTROY] = { 0, rtf_destroy }, [_DESTROY] = { 0, rtf_destroy },
[_PUT] = { 0, rtf_put }, [_PUT] = { 0, rtf_put },
[_GET] = { 0, rtf_get }, [_GET] = { 0, rtf_get },
[_RESET] = { 0, rtf_reset }, [_RESET] = { 0, rtf_reset },
[_RESIZE] = { 0, rtf_resize }, [_RESIZE] = { 0, rtf_resize },
[_SEM_INIT] = { 0, rtf_sem_init }, [_SEM_INIT] = { 0, rtf_sem_init },
[_SEM_DESTRY] = { 0, rtf_sem_destroy }, [_SEM_DESTRY] = { 0, rtf_sem_destroy },
@ -1707,8 +1707,8 @@ static struct rt_fun_entry rtai_fifos_fun[] = {
[_OVERWRITE] = { 0, rtf_ovrwr_put }, [_OVERWRITE] = { 0, rtf_ovrwr_put },
[_PUT_IF] = { 0, rtf_put_if }, [_PUT_IF] = { 0, rtf_put_if },
[_GET_IF] = { 0, rtf_get_if }, [_GET_IF] = { 0, rtf_get_if },
[_AVBS] = { 0, rtf_get_avbs }, [_AVBS] = { 0, rtf_get_avbs },
[_FRBS] = { 0, rtf_get_frbs } [_FRBS] = { 0, rtf_get_frbs }
}; };
static int register_lxrt_fifos_support(void) static int register_lxrt_fifos_support(void)
@ -1757,7 +1757,7 @@ int __rtai_fifos_init(void)
if (CLASS_DEVICE_CREATE(fifo_class, MKDEV(RTAI_FIFOS_MAJOR, minor), NULL, "rtf%d", minor) == NULL) { if (CLASS_DEVICE_CREATE(fifo_class, MKDEV(RTAI_FIFOS_MAJOR, minor), NULL, "rtf%d", minor) == NULL) {
printk("RTAI-FIFO: cannot attach class.\n"); printk("RTAI-FIFO: cannot attach class.\n");
class_destroy(fifo_class); class_destroy(fifo_class);
return -EBUSY; return -EBUSY;
} }
} }
#endif #endif
@ -1805,7 +1805,7 @@ void __rtai_fifos_exit(void)
printk("RTAI-FIFO: rtai srq %d illegal or already free.\n", fifo_srq); printk("RTAI-FIFO: rtai srq %d illegal or already free.\n", fifo_srq);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
rtai_proc_fifo_unregister(); rtai_proc_fifo_unregister();
#endif #endif
kfree(fifo); kfree(fifo);
} }
@ -1834,7 +1834,7 @@ static int PROC_READ_FUN(rtai_read_fifos)
for (i = 0; i < MAX_FIFOS; i++) { for (i = 0; i < MAX_FIFOS; i++) {
if (fifo[i].opncnt > 0) { if (fifo[i].opncnt > 0) {
PROC_PRINT("%-8d %-9d %-10d %-10p %-12s", i, PROC_PRINT("%-8d %-9d %-10d %-10p %-12s", i,
fifo[i].opncnt, fifo[i].mbx.size, fifo[i].opncnt, fifo[i].mbx.size,
fifo[i].handler, fifo[i].handler,
fifo[i].malloc_type == 'v' fifo[i].malloc_type == 'v'
? "vmalloc" : "kmalloc" ? "vmalloc" : "kmalloc"
@ -1850,12 +1850,12 @@ PROC_READ_OPEN_OPS(rtai_fifos_fops, rtai_read_fifos);
static int rtai_proc_fifo_register(void) static int rtai_proc_fifo_register(void)
{ {
struct proc_dir_entry *proc_fifo_ent; struct proc_dir_entry *proc_fifo_ent;
proc_fifo_ent = CREATE_PROC_ENTRY("fifos", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root, &rtai_fifos_fops); proc_fifo_ent = CREATE_PROC_ENTRY("fifos", S_IFREG|S_IRUGO|S_IWUSR, rtai_proc_root, &rtai_fifos_fops);
if (!proc_fifo_ent) { if (!proc_fifo_ent) {
printk("Unable to initialize /proc/rtai/fifos\n"); printk("Unable to initialize /proc/rtai/fifos\n");
return(-1); return(-1);
} }
SET_PROC_READ_ENTRY(proc_fifo_ent, rtai_read_fifos); SET_PROC_READ_ENTRY(proc_fifo_ent, rtai_read_fifos);
PROC_PRINT_DONE; PROC_PRINT_DONE;
} }
@ -1885,11 +1885,11 @@ int rtf_named_create(const char *name, int size)
if (!strncmp(name, fifo[minor].name, RTF_NAMELEN)) { if (!strncmp(name, fifo[minor].name, RTF_NAMELEN)) {
break; break;
} else if (!fifo[minor].opncnt && !fifo[minor].name[0]) { } else if (!fifo[minor].opncnt && !fifo[minor].name[0]) {
strncpy(fifo[minor].name, name, RTF_NAMELEN + 1); strncpy(fifo[minor].name, name, RTF_NAMELEN + 1);
rtf_spin_unlock_irqrestore(flags, rtf_name_lock); rtf_spin_unlock_irqrestore(flags, rtf_name_lock);
if ((err = rtf_create(minor, size)) < 0) { if ((err = rtf_create(minor, size)) < 0) {
fifo[minor].name[0] = 0; fifo[minor].name[0] = 0;
return err; return err;
} }
return minor; return minor;
} }

Some files were not shown because too many files have changed in this diff Show more