Implementation of sequential monte carlo method (particle filters)

shn picture shn · May 3, 2012 · Viewed 18.7k times · Source

I'm interested in the simple algorithm for particles filter given here: http://www.aiqus.com/upfiles/PFAlgo.png It seems very simple but I have no idea on how to do it practically. Any idea on how to implement it (just to better understand how it works) ?

Edit: This is a great simple example that explain how it works: http://www.aiqus.com/questions/39942/very-simple-particle-filters-algorithm-sequential-monte-carlo-method-implementation?page=1#39950

I've tried to implement it in C++: http://pastebin.com/M1q1HcN4 but I'm note sure if I do it the right way. Can you please check if I understood it well, or there are some misunderstanding according to my code ?

#include <iostream>
#include <vector>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_01.hpp>
#include <boost/random/uniform_int_distribution.hpp>

using namespace std;
using namespace boost;

double uniform_generator(void);

#define N 4 // number of particles

#define evolutionProba_A_A 1.0/3.0 // P(X_t = A | X_t-1 = A)
#define evolutionProba_A_B 1.0/3.0 // P(X_t = A | X_t-1 = B)
#define evolutionProba_B_B 2.0/3.0 // P(X_t = B | X_t-1 = B)
#define evolutionProba_B_A 2.0/3.0 // P(X_t = B | X_t-1 = A)

#define observationProba_A_A 4.0/5.0 // P(Y_t = A | X_t = A)
#define observationProba_A_B 1.0/5.0 // P(Y_t = A | X_t = B)
#define observationProba_B_B 4.0/5.0 // P(Y_t = B | X_t = B)
#define observationProba_B_A 1.0/5.0 // P(Y_t = A | X_t = A)

/// ===========================================================================

typedef struct distrib { float PA; float PB; } Distribution;

typedef struct particle
{
    Distribution distribution; // e.g. <0.5, 0.5>
    char state; // e.g. 'A' or 'B'
    float weight; // e.g. 0.8
}
Particle;

/// ===========================================================================

int main()
{
    vector<char> Y; // data observations
    Y.push_back('A'); Y.push_back('B'); Y.push_back('A'); Y.push_back('A'); Y.push_back('A'); Y.push_back('B');
    Y.push_back('A'); Y.push_back('A'); Y.push_back('B'); Y.push_back('A'); Y.push_back('B'); Y.push_back('A');
    Y.push_back('A'); Y.push_back('B'); Y.push_back('B'); Y.push_back('A'); Y.push_back('A'); Y.push_back('B');

    vector< vector<Particle> > Xall; // vector of all particles from time 0 to t

    /// Step (1) Initialisation
    vector<Particle> X; // a vector of N particles
    for(int i = 0; i < N; ++i)
    {
        Particle x;

        // sample particle Xi from initial distribution
        x.distribution.PA = 0.5; x.distribution.PB = 0.5;
        float r = uniform_generator();
        if( r <= x.distribution.PA ) x.state = 'A'; // r <= 0.5
        if( x.distribution.PA < r && r <= x.distribution.PA + x.distribution.PB ) x.state = 'B'; // 0.5 < r <= 1

        X.push_back(x);
    }

    Xall.push_back(X);
    X.clear();

    /// Observing data
    for(int t = 1; t <= 18; ++t)
    {
        char y = Y[t-1]; // current observation

        /// Step (2) Importance sampling
        float sumWeights = 0;
        vector<Particle> X; // a vector of N particles
        for(int i = 0; i < N; ++i)
        {
            Particle x;

            // P(X^i_t = A) = P(X^i_t = A | X^i_t-1 = A) * P(X^i_t-1 = A) + P(X^i_t = A | X^i_t-1 = B) * P(X^i_t-1 = B)
            x.distribution.PA = evolutionProba_A_A * Xall[t-1][i].distribution.PA + evolutionProba_A_B * Xall[t-1][i].distribution.PB;

            // P(X^i_t = B) = P(X^i_t = B | X^i_t-1 = A) * P(X^i_t-1 = A) + P(X^i_t = B | X^i_t-1 = B) * P(X^i_t-1 = B)
            x.distribution.PB = evolutionProba_B_A * Xall[t-1][i].distribution.PA + evolutionProba_B_B * Xall[t-1][i].distribution.PB;

            // sample the a particle from this distribution
            float r = uniform_generator();
            if( r <= x.distribution.PA ) x.state = 'A';
            if( x.distribution.PA < r && r <= x.distribution.PA + x.distribution.PB ) x.state = 'B';

            // compute weight of this particle according to the observation y
            if( y == 'A' )
            {
                if( x.state == 'A' ) x.weight = observationProba_A_A; // P(y = A | X^i_t = A)
                else if( x.state == 'B' ) x.weight = observationProba_A_B; // P(y = A | X^i_t = B)
            }
            else if( y == 'B' )
            {
                if( x.state == 'A' ) x.weight = observationProba_B_A; // P(y = B | X^i_t = A)
                else if( x.state == 'B' ) x.weight = observationProba_B_B; // P(y = B | X^i_t = B)
            }

            sumWeights += x.weight;

            X.push_back(x);
        }

        // normalise weights
        for(int i = 0; i < N; ++i)
            X[i].weight /= sumWeights;

        /// Step (3) resampling N particles according to weights
        float PA = 0, PB = 0;
        for(int i = 0; i < N; ++i)
        {
            if( X[i].state == 'A' ) PA += X[i].weight;
            else if( X[i].state == 'B' ) PB += X[i].weight;
        }

        vector<Particle> reX; // new vector of particles
        for(int i = 0; i < N; ++i)
        {
            Particle x;

            x.distribution.PA = PA;
            x.distribution.PB = PB;

            float r = uniform_generator();
            if( r <= x.distribution.PA ) x.state = 'A';
            if( x.distribution.PA < r && r <= x.distribution.PA + x.distribution.PB ) x.state = 'B';

            reX.push_back(x);
        }

        Xall.push_back(reX);
    }

    return 0;
}

/// ===========================================================================

double uniform_generator(void)
{
    mt19937 gen(55);
    static uniform_01< mt19937, double > uniform_gen(gen);
    return uniform_gen();
}

Answer

penelope picture penelope · May 15, 2012

This online course is very easy and straightforward to understand and to me it explained particle filters really well.

It's called "Programming a Robotic Car", and it talks about three methods of localiczation: Monte Carlo localization, Kalman filters and particle filters.

The course is completely free (it's finished now so you can't actively participate but you can still watch the lectures), taught by a Stanford professor. The "classes" were surprisingly easy for me to follow, and they are accompanied by little exercises -- some of them just logical, but a good deal of them programming. Also, homework assignments which you can play with.

It actually makes you write your own code in python for all the filters, which they also test for you. By the end of the course, you should have all 3 filters implemented in python.

Warmly recommend to check it out.