<launch>
<!--...................................
- Load URDF to param server
- Robot State Publisher
- Rviz with description from file.rviz
- Joint State Publisher with GUI
.......................................-->
<arg name="model" default="$(find visbot)/urdf/visbot.urdf"/>
<arg name="gui" default="True" />
<param name="robot_description" textfile="$(arg model)" />
<param name="use_gui" value="$(arg gui)"/>
<node name="robot_state_publisher" pkg="robot_state_publisher" type="state_publisher" />
<node name="rviz" pkg="rviz" type="rviz" args="-d $(find visbot)/urdf.rviz" respawn="true"/>
<!--<node name="rviz" pkg="rviz" type="rviz" args="-d $(find visbot)/urdf.rviz" required="true" />-->
<node name="joint_state_publisher" pkg="joint_state_publisher" type="joint_state_publisher">
<param name="/zeros/YAW" value="1.570796327"/>
<param name="/zeros/PITCH" value="0.785398163"/>
<rosparam param="/source_list"> [/WheelsState] </rosparam>
</node>
<!--.......................................................................-->
<!--........................................................................
- jointrepnode Replublicador de la info de los servos
- rosserial para comunicarnos con el arduino de la cabeza
- ros arduino para comunicarnos con el arduino de la base
............................................................................-->
<node name="jointrepnode" pkg="visbot" type="jointrepnode"/> <!--nombre de nodo y ejecutable identicos-->
<node name="serial_node" pkg="rosserial_python" type="serial_node.py" args="/dev/ttyACM3" respawn="true"/>
<!--respawn para reiniciar el nodo automaticamente si se cierra-->
<node name="ros_arduino_node" pkg="ros_arduino_python" type="arduino_node.py" output="screen" respawn="true"
respawn_delay="10">
<rosparam file="$(find ros_arduino_python)/config/my_arduino_params.yaml" command="load" />
<rosparam param="/ros_arduino_node/port"> /dev/ttyUSB0 </rosparam>
<!--para poder manipular mas rapidamente el parametro del puerto-->
</node>
<!--.......................................................................-->
<!--
- Llamada a los servicios que encienden las camaras
- Republicadores de imagenes de camara izquierda y derecha
- stereo image proc
-->
<node name="ss" args="call /left/camera/start_capture" pkg="rosservice" type="rosservice"/>
<node name="sss" args="call /right/camera/start_capture" pkg="rosservice" type="rosservice"/>
<node name="republishLeft" args="compressed in:=/left/camera/image/ raw out:=/stereo/left/image_raw" pkg="image_transport" type="republish"/>
<node name="republishRight" args="compressed in:=/right/camera/image/ raw out:=/stereo/right/image_raw" pkg="image_transport" type="republish"/>
<node ns="stereo" name="stereo_image_proc" args="_approximate_sync:=True _queue_size:=5 _disparity_range:=200" pkg="stereo_image_proc" type="stereo_image_proc"/>
<!--.........................................................................-->
<!-- Choose visualization -->
<arg name="rviz" default="true" />
<arg name="rtabmapviz" default="true" />
<param name="use_sim_time" type="bool" value="False"/>
<!--<arg name="pi/2" value="1.5707963267948966" />-->
<!--<arg name="optical_rotate" value="0 0 0 -$(arg pi/2) 0 -$(arg pi/2)" />-->
<!--<node pkg="tf" type="static_transform_publisher" name="base_to_camera_tf" args="$(arg optical_rotate) /base_link /camera 100" />-->
<!--<node pkg="tf" type="static_transform_publisher" name="base_to_camera_tf" args="0.0 0.0 0.0 0.0 0.0 0.0 /base_link /camera 100" />-->
<!-- Stereo Odometry -->
<!--<node pkg="viso2_ros" type="stereo_odometer" name="stereo_odometer" output="screen">-->
<!--<remap from="stereo" to="/stereo"/>-->
<!--<remap from="image" to="image_rect"/>-->
<!--<remap from="stereo_odometer/odometry" to="/odometry"/>-->
<!--<param name="base_link_frame_id" value="base_link"/>-->
<!--<param name="odom_frame_id" value="odom"/>-->
<!--<param name="ref_frame_change_method" value="1"/>-->
<!--<param name="queue_size" value="5"/>-->
<!--<param name="approximate_sync" type="bool" value="True"/>-->
<!--</node>-->
<!-- ROS navigation stack move_base -->
<!--<group ns="planner">-->
<!--<remap from="openni_points" to="/planner_cloud"/>-->
<!--<remap from="base_scan" to="/base_scan"/>-->
<!--<remap from="map" to="/map"/>-->
<!--<remap from="move_base_simple/goal" to="/planner_goal"/>-->
<!--<node pkg="move_base" type="move_base" respawn="false" name="move_base" output="screen">-->
<!--<rosparam file="/home/ubuntu/catkin_ws/src/VisBot/ConfigFiles/costmap_common_params.yaml" command="load" ns="global_costmap" />-->
<!--<rosparam file="/home/ubuntu/catkin_ws/src/VisBot/ConfigFiles/costmap_common_params.yaml" command="load" ns="local_costmap" />-->
<!--<rosparam file="/home/ubuntu/catkin_ws/src/VisBot/ConfigFiles/local_costmap_params_3d.yaml" command="load" />-->
<!--<rosparam file="/home/ubuntu/catkin_ws/src/VisBot/ConfigFiles/global_costmap_params.yaml" command="load" />-->
<!--<rosparam file="/home/ubuntu/catkin_ws/src/VisBot/ConfigFiles/base_local_planner_params.yaml" command="load" />-->
<!--</node>-->
<!--<param name="cmd_vel/abtr_priority" value="10"/>-->
<!--</group>-->
<!-- Create point cloud for the planner -->
<node pkg="nodelet" type="nodelet" name="disparity2cloud" args="load rtabmap_ros/point_cloud_xyz stereo_nodelet">
<remap from="disparity/image" to="disparity"/>
<remap from="disparity/camera_info" to="right/camera_info_throttle"/>
<remap from="cloud" to="cloudXYZ"/>
<param name="voxel_size" type="double" value="0.05"/>
<param name="decimation" type="int" value="4"/>
<param name="max_depth" type="double" value="4"/>
</node>
<node pkg="nodelet" type="nodelet" name="obstacles_detection" args="load rtabmap_ros/obstacles_detection stereo_nodelet">
<remap from="cloud" to="cloudXYZ"/>
<remap from="obstacles" to="/planner_cloud"/>
<param name="frame_id" type="string" value="base_link"/>
<param name="map_frame_id" type="string" value="map"/>
<param name="wait_for_transform" type="bool" value="true"/>
<param name="min_cluster_size" type="int" value="20"/>
<param name="max_obstacles_height" type="double" value="0.0"/>
</node>
<group ns="rtabmap">
<!-- Visual SLAM: args: "delete_db_on_start" and "udebug" -->
<node name="rtabmap" pkg="rtabmap_ros" type="rtabmap" output="screen" args="--delete_db_on_start --udebug">
<param name="frame_id" type="string" value="/base_link"/>
<param name="subscribe_stereo" type="bool" value="true"/>
<param name="subscribe_depth" type="bool" value="false"/>
<param name="wait_for_transform" type="bool" value="true"/><!--para los warnings de tf-->
<remap from="left/image_rect" to="/stereo/left/image_rect_color"/>
<remap from="right/image_rect" to="/stereo/right/image_rect_color"/>
<!--<remap from="right/image_rect" to="/stereo/right/image_rect"/>-->
<remap from="left/camera_info" to="/stereo/left/camera_info"/>
<remap from="right/camera_info" to="/stereo/right/camera_info"/>
<!--<remap from="odom" to="/odometry"/> -->
<remap from="odom" to="/odom"/>
<param name="queue_size" type="int" value="5"/>
<param name="stereo_approx_sync" type="bool" value="True"/>
<!-- RTAB-Map's parameters -->
<param name="Rtabmap/TimeThr" type="string" value="700"/>
<param name="Rtabmap/DetectionRate" type="string" value="1"/>
<param name="Kp/WordsPerImage" type="string" value="200"/>
<param name="Kp/RoiRatios" type="string" value="0.03 0.03 0.04 0.04"/>
<param name="Kp/DetectorStrategy" type="string" value="0"/> <!-- use SURF -->
<param name="Kp/NNStrategy" type="string" value="1"/> <!-- kdTree -->
<param name="SURF/HessianThreshold" type="string" value="1000"/>
<param name="LccBow/MaxDepth" type="string" value="5"/>
<param name="LccBow/MinInliers" type="string" value="10"/>
<param name="LccBow/InlierDistance" type="string" value="0.02"/>
<param name="LccReextract/Activated" type="string" value="true"/>
<param name="LccReextract/MaxWords" type="string" value="500"/>
<param name="Kp/MaxDepth" type="string" value="8.0"/>
<!-- Disable graph optimization because we use map_optimizer node below -->
<param name="RGBD/ToroIterations" type="string" value="0"/>
</node>
<!-- Optimizing outside rtabmap node makes it able to optimize always the global map -->
<node pkg="rtabmap_ros" type="map_optimizer" name="map_optimizer"/>
<node if="$(arg rviz)" pkg="rtabmap_ros" type="map_assembler" name="map_assembler">
<param name="occupancy_grid" type="bool" value="true"/>
<remap from="mapData" to="mapData_optimized"/>
<remap from="grid_projection_map" to="/map"/>
</node>
<!-- Visualisation RTAB-Map -->
<node if="$(arg rtabmapviz)" pkg="rtabmap_ros" type="rtabmapviz" name="rtabmapviz" args="-d $(find rtabmap_ros)/launch/config/rgbd_gui.ini" output="screen">
<param name="subscribe_stereo" type="bool" value="true"/>
<param name="subscribe_odom_info" type="bool" value="false"/><!--true solo para uso de odometria rtabmap-->
<param name="queue_size" type="int" value="5"/>
<param name="frame_id" type="string" value="/base_link"/>
<param name="wait_for_transform" type="bool" value="true"/><!--para los warnings de tf-->
<remap from="left/image_rect" to="/stereo/left/image_rect_color"/>
<remap from="right/image_rect" to="/stereo/right/image_rect_color"/>
<!--<remap from="right/image_rect" to="/stereo/right/image_rect"/>-->
<remap from="left/camera_info" to="/stereo/left/camera_info"/>
<remap from="right/camera_info" to="/stereo/right/camera_info"/>
<remap from="odom_info" to="/odom_info"/>
<!--<remap from="odom" to="/odometry"/>-->
<remap from="odom" to="/odom"/>
<remap from="mapData" to="mapData_optimized"/>
</node>
</group>
<!-- Visualisation RVIZ -->
<!--<node if="$(arg rviz)" pkg="rviz" type="rviz" name="rviz" args="-d $(find rtabmap_ros)/launch/config/demo_stereo_outdoor.rviz"/>-->
</launch>
VisBot
martes, 3 de marzo de 2015
domingo, 22 de febrero de 2015
2- Módulo de Visión de VisBot || Parte 1 (Cámaras)
En esta
primera parte sobre el módulo de visión de VisBot trataremos sobre los primeros
pasos en el desarrollo del módulo de visión del robot desde el Hardware utilizado para la captura de imágenes hasta la obtención de imágenes comprimidas capturadas dentro del sistema de ROS.
En primer
lugar mencionar que la visión será estereoscópica y esta será proporcionada gracias a 2 cámaras "RaspberryPiCamera"
cada una de ellas montada sobre una "RaspberryPi B+" a través del puerto MIPI
CSI.
Cada una de
las cámaras tiene las siguientes características destacadas:
Tamaño 25 x 20 x 9 mm
Peso 3 g
Sensor OmniVision OV5647
Resolución de sensor
2592 x 1944 pixels
Framerate Máximo
90 fps
En esta página se puede observar el resto de características
de las cámaras: www.raspberrypi.org/documentation/hardware/camera.md
En ambas Raspberrys (las que controlan cada una de las
cámaras) se ha instalado el mismo Software que consiste en un sistema operativo
denominado "Raspbian" que es una adaptación de Debian preparada para
la RaspberryPi, el Framework de ROS en su versión Indigo y dentro de este un
nodo específicamente diseñado para controlar la "RaspberryPiCamera"
denominado "raspicam_node".
Cada uno de los 2 submódulos de visión formados por " RaspberryPi B+" más
"RaspberryPiCamera" están posicionados a la misma altura pero diferente distancia horizontal entre si manteniendose tambien ambos sobre el mismo eje horizontal y estando sujetos de forma fija encajados en 2 bases de metacrilato con varias perforaciones para poder operar con el Hardware, entre ellas destacan las perforaciones destinadas a que las cámaras puedan captar imágenes del mundo real.
Para instalar "Raspbian" en las Raspberrys nos
hemos basado en este tutorial: Instalación Raspbian.
Para instalar el Framework de ROS Indigo se realizó una
compilación de dicho Framework desde su fuente como se explica en este
tutorial: Instalar y compilar ROS desde fuente en Raspbian.
Una vez que esta operativo el Framework pasamos a la
instalación desde fuente del paquete "raspicam" que contiene el
"raspicam_node" la cual la realizamos dando los siguientes pasos
dentro de la consola de comandos:
1- En primer lugar necesitaremos activar la cámara para que
el sistema operativo la reconozca y podamos operar con ella (aclarar que esta
operación no consiste en encender la cámara). Esto lo haremos dirigiéndonos
a la configuración de la Raspberry con
el comando "sudo raspi-config" y navegando hasta la seccion
"camera" donde tendremos que cambiar el estado a "enable".
2- Seguidamente instalamos algunos paquetes de ROS que
necesitará el nodo "raspicam_node" para funcionar correctamente. El
comando a ejecutar es el siguiente: "sudo apt-get install
ros-indigo-image-transport ros-indigo-image-transport-plugins
ros-indigo-image-transport-plugins ros-indigo-camera-info-manager"
3- Creamos un Catkin WorkSpace para trabajar con ROS
mediante este tutorial: Tutorial para crear un Catkin WorkSpace.
4- Nos dirigimos al WorkSpace que hemos creado y nos
introducimos dentro de la carpeta "src". Una vez dentro de esta carpeta
descargamos en esta el código fuente del "raspicam_node" con esta
instrucción: "git clone https://github.com/fpasteau/raspicam_node.git
raspicam"
5- Volvemos al directorio raíz del WorkSpace y ejecutamos el
comando de ROS destinado a realizar la compilación el cual realizará una
compilación de todos los módulos que haya en el WorkSpace. La instrucción es
esta: "catkin_make"
Una vez que hemos realizado los pasos anteriores el nodo
"raspicam_node" ya estaría disponible para ser ejecutado utilizando la
instrucción de ROS destinada a la ejecución de nodos. En este caso la
instrucción de ejecución será esta: "rosrun raspicam raspicam_node".
Antes de continuar, dar unos apuntes sobre la estructura y
funcionamiento del nodo:
- Publica sobre 2 Topics
"/camera/compressed" y "/camera/camera_info".
Por el Topic "/camera/compressed" se publican
mensajes de tipo "sensor_msgs/CompressedImage" conteniendo cada uno
de los cuales una imagen codificada en "jpeg" ademas de información
adicional como el "time stamp" incluido dentro del "Header"
del mensaje que es un dato que hace referencia al instante de tiempo en el que
se toman las imágenes y que servirá para sincronizar las imágenes tomadas por 2
cámaras distintas. El "Header" mencionado antes es un tipo de dato predefinido de
ROS que al utilizarlo dentro de una definición de tipo de mensaje dentro de
este se recopilará información especial relativa al mensaje cuando este se cree
y dicha información no es necesaria que la tengan todos los mensajes que se
crean en el Framework de ROS pero es útil para los casos en los que es
necesario transmitir dentro de un mensaje el "time stamp". El motivo
por el que se utilizan imágenes comprimidas en "jpeg" es porque de
esta forma se evita congestionar los Topics teniendo en cuenta por ejemplo que
estos Topics pueden estar mandando los
mensajes en una Red local a través de Ethernet.
Por otro lado en el Topic "/camera/camera_info" se
publican mensajes de tipo "sensor_msgs/CameraInfo". Por cada mensaje
de tipo "sensor_msgs/CompressedImage"generado se creará un
"sensor_msgs/CameraInfo" asociado que tendrá el mismo "time
stamp" de la imagen a la que hace referencia. Estos mensajes de tipo
"sensor_msgs/CameraInfo" además de contener el "Header"
también contienen datos sobre la calibración de la cámara y la región real de
la imagen que es capturada por el driver de la cámara, toda esta información se
utiliza para que siempre que se requiera realizar algún tipo de procesamiento
sobre una imagen se tenga también disponibles los parámetros de cámara
asociados a la imagen así como la región de captura.
-Dispone de 3 servicios básicos:
"/camera/start_capture" cuya función es que la
cámara empiece a grabar y se empiecen a publicar mensajes sobre los Topics
mencionados anteriormente.
"/camera/stop_capture" detener la captura de
imagen y la publicación de mensajes en los Topics.
"/set_camera_info" que sirve para almacenar de
forma permanente dentro del paquete del nodo "raspicam_node" los
parámetros de calibración de la cámara. Estos parámetros se almacenarán dentro
del paquete "raspicam" en "/calibrations/camera.yaml"
-Tiene disponibles 5 parámetros configurables:
"width", ancho de las imágenes capturadas. Los
valores de este parámetro tienen la siguiente restricción: 0 < width <=
1920
"height", altura de las imágenes capturadas. Los
valores de este parámetro tienen la siguiente restricción: 0 < height <=
1080
“framerate”,
frecuencia de captura de las imágenes y por lo tanto también frecuencia de emisión
de mensajes por los Topics. Los valores de este parámetro tienen la siguiente restricción:
0 < framerate <= 90
“quality”,
calidad de las imágenes capturadas. Los valores de este parámetro tienen la
siguiente restricción: 0 < quality <= 100
“tf_prefix”, prefijo para el “frame_id”. El “frame_id” es
otro de los atributos del tipo de dato “Header”.
domingo, 30 de noviembre de 2014
Tutorial -- Building of LSD-SLAM on ROS with Catkin Workspace (ROS Indigo + Ubuntu 14.04 [64-bit]) (English)
In
this tutorial, we'll see how to compile the LSD-SLAM project on Ubuntu
14.04 using the ROS (Robot Operating System) framework in its "Indigo
Igloo" version and specifically using "Catkin", which is currently the
official building system of ROS and has become the successor of the
original ROS building system known as "Rosbuild".
In the following, the steps to carry out the installation and compilation of the LSD-SLAM project are described:
1- Once ROS has been installed and configured and we have created a Catkin Workspace, we open a terminal on Ubuntu and we navigate to the folder that we are using as the Catkin Workspace by running the following command:
cd <path_to_catkin_ws>/src
In the preceding command, where <path_to_catkin_ws> appears one must put the location in which the Catkin Workspace is found within the Ubuntu file-system.
2- Within the Workspace we clone the principal "branch" of the LSD-SLAM project with the command: git clone https://github.com/tum-vision/lsd_slam.git
3- In the following, we change to the Catkin "branch" of the LSD-SLAM project with these commands:
git checkout catkin
4- With the preceding commands, we should already have available in the Workspace the code of the LSD-SLAM "branch" of Catkin, but even so we next perform a simple manual verification to prove that the structure of the project files that we have in our Workspace is in correspondence with the Catkin "branch" of the LSD-SLAM project.
We run the command: ls
The response that we should receive should be the following:
LICENSE lsd_slam lsd_slam_core lsd_slam_viewer README.md
5- At this point, it would only remain for us to install the dependencies that the project needs in order to be compiled.
We must install the following libraries:
ros-indigo-libg2o ros-indigo-cv-bridge liblapack-dev libblas-dev freeglut3-dev libqglviewer-dev libsuitesparse-dev libx11-dev
The installation of these libraries carried out with the following single command:
sudo apt-get install ros-indigo-libg2o ros-indigo-cv-bridge liblapack-dev libblas-dev freeglut3-dev libqglviewer-dev libsuitesparse-dev libx11-dev
6) At this point, we try to perform the compilation. For that, we first of all change to the Catkin Workspace with the instruction: cd <path_to_catkin_ws>
Next we run the command that performs the compilation: catkin_make
In case the compilation is successful, you can go to step 11 to perform the execution test of the project, being able to visualize that way that the whole system works correctly.
7- If the compilation has failed, this is probably because some configuration files of the project are not set up properly for compilation in ROS Indigo with Catkin Workspace. First of all, we open the file "package.xml" both in the folder "lsd_slam/lsd_slam_viewer" and in the folder "lsd_slam/lsd_slam_core". Once both files are open, we will verify that inside them there are the two following instructions:
...
<build_depend>cmake_modules</build_depend>
...
<run_depend>cmake_modules</run_depend>
...
In case they don't exist, we will introduce them in the following way:
The instruction <build_depend>cmake_modules</build_depend> will go right after all of the instructions of type <build_depend>
The instruction <run_depend>cmake_modules</run_depend> will go just after all of the instructions of type <run_depend>.
8- We continue verification of the configuration files. Now we will open the file "CMakeFiles.txt" both in the folder "lsd_slam/lsd_slam_viewer" and in the folder "lsd_slam/lsd_slam_core" and we will verify that in both files there is the following instruction:
...
find_package(cmake_modules REQUIRED)
...
In case it doesn't exist, we introduce it just before the first instruction of the type "find_package".
9- We will finish the test of the configuration files by opening the file "CMakeLists.txt" in the folder "lsd_slam/lsd_slam_core" and verifying that the following instruction exists:
...
target_link_libraries(lsdslam ${FABMAP_LIB} ${G2O_LIBRARIES} ${catkin_LIBRARIES} csparse cxsparse X11)
...
In case the preceding instruction doesn't exist, there must exist another similar one without the added library "X11", therefore the only thing that we would have to do would be to add the library "X11" to the instruction "target_link_libraries" as is shown in the instruction above.
10- At this point we return to compiling the project as explained in step 6. The commands for compiling are the following:
cd <path_to_catkin_ws>
catkin_make
In this case the compilation ought to be performed correctly since all of the configuration files are adapted for the compilation in ROS Indigo with Catkin Workspace.
11) At this point, once we have already succeeded in compiling the project, we will carry out a small test of the system with the aid of one of the ".bag" files that the creators of the project provide to test the LSD_SLAM system.
The ".bag" file that we will utilize can be downloaded here: LSD_room.bag.zip
Once the file "LSD_room.bag.zip" is downloaded, it will have to be decompressed.
In the following, we will begin to start the nodes needed to execute the test.
We will open a new terminal and enter the following command to start the ROS CORE: roscore
We open a new terminal and enter the following command to execute the LSD-SLAM visualization node: rosrun lsd_slam_viewer viewer
We open a new terminal and enter the following command to execute the main LSD-SLAM node: rosrun lsd_slam_core live_slam image:=/image_raw camera_info:=/camera_info
At this point it only remains for us to execute the file "LSD_room.bag" with the following instruction: rosbag play <path>/LSD_room.bag
In the command above, <path> would be the location in the Ubuntu file-system where the file "LSD_room.bag" is stored.
The result would be something like this:
Final note: If on execution the "viewer" produces some error, make sure that you have the latest drivers of the graphics card and have installed all the software updates of Ubuntu.
In case of Ubuntu being virtualized with VirtualBox, make sure that you have installed all the software updates of Ubuntu and also have installed "Guest Additions", and if there are still errors appearing that don't abort execution of the node, they don't matter because they are minor problems of the virtualization that don't hinder the correct execution of the "viewer".
References:
Official page of the LSD-SLAM project: vision.in.tum.de/research/lsdslam
Official page of the LSD-SLAM project code: github.com/tum-vision/lsd_slam
Official documentation of "Catkin": catkin/conceptual_overview
Official tutorial of installation and ROS configuration: InstallingandConfiguringROSEnvironment Official tutorial for creation of the Catkin Workspace: catkin/Tutorials/create_a_workspace
In the following, the steps to carry out the installation and compilation of the LSD-SLAM project are described:
1- Once ROS has been installed and configured and we have created a Catkin Workspace, we open a terminal on Ubuntu and we navigate to the folder that we are using as the Catkin Workspace by running the following command:
cd <path_to_catkin_ws>/src
In the preceding command, where <path_to_catkin_ws> appears one must put the location in which the Catkin Workspace is found within the Ubuntu file-system.
2- Within the Workspace we clone the principal "branch" of the LSD-SLAM project with the command: git clone https://github.com/tum-vision/lsd_slam.git
3- In the following, we change to the Catkin "branch" of the LSD-SLAM project with these commands:
cd lsd_slam/
4- With the preceding commands, we should already have available in the Workspace the code of the LSD-SLAM "branch" of Catkin, but even so we next perform a simple manual verification to prove that the structure of the project files that we have in our Workspace is in correspondence with the Catkin "branch" of the LSD-SLAM project.
We run the command: ls
The response that we should receive should be the following:
LICENSE lsd_slam lsd_slam_core lsd_slam_viewer README.md
5- At this point, it would only remain for us to install the dependencies that the project needs in order to be compiled.
We must install the following libraries:
ros-indigo-libg2o ros-indigo-cv-bridge liblapack-dev libblas-dev freeglut3-dev libqglviewer-dev libsuitesparse-dev libx11-dev
The installation of these libraries carried out with the following single command:
sudo apt-get install ros-indigo-libg2o ros-indigo-cv-bridge liblapack-dev libblas-dev freeglut3-dev libqglviewer-dev libsuitesparse-dev libx11-dev
6) At this point, we try to perform the compilation. For that, we first of all change to the Catkin Workspace with the instruction: cd <path_to_catkin_ws>
Next we run the command that performs the compilation: catkin_make
In case the compilation is successful, you can go to step 11 to perform the execution test of the project, being able to visualize that way that the whole system works correctly.
7- If the compilation has failed, this is probably because some configuration files of the project are not set up properly for compilation in ROS Indigo with Catkin Workspace. First of all, we open the file "package.xml" both in the folder "lsd_slam/lsd_slam_viewer" and in the folder "lsd_slam/lsd_slam_core". Once both files are open, we will verify that inside them there are the two following instructions:
...
<build_depend>cmake_modules</build_depend>
...
<run_depend>cmake_modules</run_depend>
...
In case they don't exist, we will introduce them in the following way:
The instruction <build_depend>cmake_modules</build_depend> will go right after all of the instructions of type <build_depend>
The instruction <run_depend>cmake_modules</run_depend> will go just after all of the instructions of type <run_depend>.
8- We continue verification of the configuration files. Now we will open the file "CMakeFiles.txt" both in the folder "lsd_slam/lsd_slam_viewer" and in the folder "lsd_slam/lsd_slam_core" and we will verify that in both files there is the following instruction:
...
find_package(cmake_modules REQUIRED)
...
In case it doesn't exist, we introduce it just before the first instruction of the type "find_package".
9- We will finish the test of the configuration files by opening the file "CMakeLists.txt" in the folder "lsd_slam/lsd_slam_core" and verifying that the following instruction exists:
...
target_link_libraries(lsdslam ${FABMAP_LIB} ${G2O_LIBRARIES} ${catkin_LIBRARIES} csparse cxsparse X11)
...
In case the preceding instruction doesn't exist, there must exist another similar one without the added library "X11", therefore the only thing that we would have to do would be to add the library "X11" to the instruction "target_link_libraries" as is shown in the instruction above.
10- At this point we return to compiling the project as explained in step 6. The commands for compiling are the following:
cd <path_to_catkin_ws>
catkin_make
In this case the compilation ought to be performed correctly since all of the configuration files are adapted for the compilation in ROS Indigo with Catkin Workspace.
11) At this point, once we have already succeeded in compiling the project, we will carry out a small test of the system with the aid of one of the ".bag" files that the creators of the project provide to test the LSD_SLAM system.
The ".bag" file that we will utilize can be downloaded here: LSD_room.bag.zip
Once the file "LSD_room.bag.zip" is downloaded, it will have to be decompressed.
In the following, we will begin to start the nodes needed to execute the test.
We will open a new terminal and enter the following command to start the ROS CORE: roscore
We open a new terminal and enter the following command to execute the LSD-SLAM visualization node: rosrun lsd_slam_viewer viewer
We open a new terminal and enter the following command to execute the main LSD-SLAM node: rosrun lsd_slam_core live_slam image:=/image_raw camera_info:=/camera_info
At this point it only remains for us to execute the file "LSD_room.bag" with the following instruction: rosbag play <path>/LSD_room.bag
In the command above, <path> would be the location in the Ubuntu file-system where the file "LSD_room.bag" is stored.
The result would be something like this:
Final note: If on execution the "viewer" produces some error, make sure that you have the latest drivers of the graphics card and have installed all the software updates of Ubuntu.
In case of Ubuntu being virtualized with VirtualBox, make sure that you have installed all the software updates of Ubuntu and also have installed "Guest Additions", and if there are still errors appearing that don't abort execution of the node, they don't matter because they are minor problems of the virtualization that don't hinder the correct execution of the "viewer".
References:
Official page of the LSD-SLAM project: vision.in.tum.de/research/lsdslam
Official page of the LSD-SLAM project code: github.com/tum-vision/lsd_slam
Official documentation of "Catkin": catkin/conceptual_overview
Official tutorial of installation and ROS configuration: InstallingandConfiguringROSEnvironment Official tutorial for creation of the Catkin Workspace: catkin/Tutorials/create_a_workspace
Suscribirse a:
Entradas (Atom)