Source code for qinfer.test_models

#!/usr/bin/python
# -*- coding: utf-8 -*-
##
# test_models.py: Simple models for testing inference engines.
##
# © 2017, Chris Ferrie (csferrie@gmail.com) and
#         Christopher Granade (cgranade@cgranade.com).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
#     1. Redistributions of source code must retain the above copyright
#        notice, this list of conditions and the following disclaimer.
#
#     2. Redistributions in binary form must reproduce the above copyright
#        notice, this list of conditions and the following disclaimer in the
#        documentation and/or other materials provided with the distribution.
#
#     3. Neither the name of the copyright holder nor the names of its
#        contributors may be used to endorse or promote products derived from
#        this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##

## FEATURES ##################################################################

from __future__ import absolute_import
from __future__ import division # Ensures that a/b is always a float.

## EXPORTS ###################################################################

__all__ = [
    'SimpleInversionModel',
    'SimplePrecessionModel',
    'UnknownT2Model',
    'CoinModel',
    'NoisyCoinModel',
    'NDieModel'
]

## IMPORTS ###################################################################

from builtins import range

import numpy as np

from .utils import binomial_pdf, decorate_init
from .abstract_model import FiniteOutcomeModel, DifferentiableModel
from ._due import due, Doi

## CLASSES ####################################################################

class SimpleInversionModel(FiniteOutcomeModel, DifferentiableModel):
    r"""
    Describes the free evolution of a single qubit prepared in the
    :math:`\left|+\right\rangle` state under a Hamiltonian :math:`H = \omega \sigma_z / 2`,
    using the interactive QLE model proposed by [WGFC13a]_.

    :param float min_freq: Minimum value for :math:`\omega` to accept as valid.
        This is used for testing techniques that mitigate the effects of
        degenerate models; there is no "good" reason to ever set this other
        than zero, other than to test with an explicitly broken model.
    """
    
    ## INITIALIZER ##

    def __init__(self, min_freq=0):
        super(SimpleInversionModel, self).__init__()
        self._min_freq = min_freq

    ## PROPERTIES ##
    
    @property
    def n_modelparams(self):
        return 1
    
    @property
    def modelparam_names(self):
        return [r'\omega']
        
    @property
    def expparams_dtype(self):
        return [('t', 'float'), ('w_', 'float')]
    
    @property
    def is_n_outcomes_constant(self):
        """
        Returns ``True`` if and only if the number of outcomes for each
        experiment is independent of the experiment being performed.
        
        This property is assumed by inference engines to be constant for
        the lifetime of a Model instance.
        """
        return True
    
    ## METHODS ##
    
    def are_models_valid(self, modelparams):
        return np.all(modelparams > self._min_freq, axis=1)
    
    def n_outcomes(self, expparams):
        """
        Returns an array of dtype ``uint`` describing the number of outcomes
        for each experiment specified by ``expparams``.
        
        :param numpy.ndarray expparams: Array of experimental parameters. This
            array must be of dtype agreeing with the ``expparams_dtype``
            property.
        """
        return 2
    
    def likelihood(self, outcomes, modelparams, expparams):
        # By calling the superclass implementation, we can consolidate
        # call counting there.
        super(SimpleInversionModel, self).likelihood(
            outcomes, modelparams, expparams
        )

        # Possibly add a second axis to modelparams.
        if len(modelparams.shape) == 1:
            modelparams = modelparams[..., np.newaxis]
            
        t = expparams['t']
        dw = modelparams - expparams['w_']
        
        # Allocating first serves to make sure that a shape mismatch later
        # will cause an error.
        pr0 = np.zeros((modelparams.shape[0], expparams.shape[0]))
        pr0[:, :] = np.cos(t * dw / 2) ** 2

        # Now we concatenate over outcomes.
        return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, pr0)

    def score(self, outcomes, modelparams, expparams, return_L=False):
        if len(modelparams.shape) == 1:
            modelparams = modelparams[:, np.newaxis]

        t = expparams['t']
        dw = modelparams - expparams['w_']

        outcomes = outcomes.reshape((outcomes.shape[0], 1, 1))

        arg = dw * t / 2
        q = (
            np.power( t / np.tan(arg), outcomes) *
            np.power(-t * np.tan(arg), 1 - outcomes)
        )[np.newaxis, ...]

        assert q.ndim == 4


        if return_L:
            return q, self.likelihood(outcomes, modelparams, expparams)
        else:
            return q


[docs]class SimplePrecessionModel(SimpleInversionModel): r""" Describes the free evolution of a single qubit prepared in the :math:`\left|+\right\rangle` state under a Hamiltonian :math:`H = \omega \sigma_z / 2`, as explored in [GFWC12]_. :param float min_freq: Minimum value for :math:`\omega` to accept as valid. This is used for testing techniques that mitigate the effects of degenerate models; there is no "good" reason to ever set this to be less than zero, other than to test with an explicitly broken model. :modelparam omega: The precession frequency :math:`\omega`. :scalar-expparam float: The evolution time :math:`t`. """ @property def expparams_dtype(self): return 'float'
[docs] def likelihood(self, outcomes, modelparams, expparams): # Pass the expparams to the superclass as a record array. new_eps = np.empty(expparams.shape, dtype=super(SimplePrecessionModel, self).expparams_dtype) new_eps['w_'] = 0 try: new_eps['t'] = expparams except ValueError: new_eps['t'] = expparams['t'] return super(SimplePrecessionModel, self).likelihood(outcomes, modelparams, new_eps)
[docs] def score(self, outcomes, modelparams, expparams, return_L=False): # Pass the expparams to the superclass as a record array. new_eps = np.empty(expparams.shape, dtype=super(SimplePrecessionModel, self).expparams_dtype) new_eps['w_'] = 0 try: new_eps['t'] = expparams except ValueError: new_eps['t'] = expparams['t'] q = super(SimplePrecessionModel, self).score(outcomes, modelparams, new_eps, return_L=False) if return_L: return q, self.likelihood(outcomes, modelparams, expparams) else: return q
[docs]@decorate_init( due.dcite( Doi('10.1088/1367-2630/14/10/103013'), description='Robust online Hamiltonian learning', tags=['implementation'] ) ) class UnknownT2Model(FiniteOutcomeModel): """ Describes the free evolution of a single qubit prepared in the :math:`\left|+\right\rangle` state under a Hamiltonian :math:`H = \omega \sigma_z / 2` with an unknown :math:`T_2` process, as explored in [GFWC12]_. :modelparam omega: The precession frequency :math:`\omega`. :modelparam T2_inv: The decoherence strength :math:`T_2^{-1}`. :scalar-expparam float: The evolution time :math:`t`. """ @property def n_modelparams(self): return 2 @property def modelparam_names(self): return [r'\omega', r'T_2^{-1}'] @property def expparams_dtype(self): return [('t', 'float')]
[docs] def n_outcomes(self, modelparams): return 2
[docs] def are_models_valid(self, modelparams): return np.all(modelparams >= 0, axis=1)
[docs] def likelihood(self, outcomes, modelparams, expparams): w, T2_inv = modelparams.T[:, :, None] t = expparams['t'] visibility = np.exp(-t * T2_inv) pr0 = np.empty((w.shape[0], t.shape[0])) pr0[:, :] = visibility * np.cos(w * t / 2) ** 2 + (1 - visibility) / 2 return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, pr0)
class CoinModel(FiniteOutcomeModel, DifferentiableModel): r""" Arguably the simplest possible model; the unknown model parameter is the bias of a coin, and an experiment consists of flipping it and looking at the result. The model parameter :math:`p` represents the probability of outcome 0. """ ## INITIALIZER ## def __init__(self): super(CoinModel, self).__init__() ## PROPERTIES ## @property def n_modelparams(self): return 1 @property def modelparam_names(self): return [r'p'] @property def expparams_dtype(self): return [] @property def is_outcomes_constant(self): """ Returns ``True`` if and only if the number of outcomes for each experiment is independent of the experiment being performed. This property is assumed by inference engines to be constant for the lifetime of a FiniteOutcomeModel instance. """ return True ## METHODS ## @staticmethod def are_models_valid(modelparams): return np.logical_and(modelparams >= 0, modelparams <= 1).all(axis=1) def n_outcomes(self, expparams): """ Returns an array of dtype ``uint`` describing the number of outcomes for each experiment specified by ``expparams``. :param numpy.ndarray expparams: Array of experimental parameters. This array must be of dtype agreeing with the ``expparams_dtype`` property. """ return 2 def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(CoinModel, self).likelihood(outcomes, modelparams, expparams) # Our job is easy. pr0 = np.tile(modelparams.flatten(), (expparams.shape[0], 1)).T # Now we concatenate over outcomes. return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, pr0) def score(self, outcomes, modelparams, expparams, return_L=False): p = modelparams.flatten()[np.newaxis, :] side = outcomes.flatten()[:, np.newaxis] q = (1 - side) / p - side / (1 - p) # we need to add singleton dimension since there # is only one model param q = q[np.newaxis, :, :] # duplicate this for any exparams we have q = np.tile(q, (expparams.shape[0], 1, 1, 1)).transpose((1,2,3,0)) if return_L: return q, self.likelihood(outcomes, modelparams, expparams) else: return q
[docs]class NoisyCoinModel(FiniteOutcomeModel): r""" Implements the "noisy coin" model of [FB12]_, where the model parameter :math:`p` is the probability of the noisy coin. This model has two experiment parameters, :math:`\alpha` and :math:`\beta`, which are the probabilities of observing a "0" outcome conditoned on the "true" outcome being 0 and 1, respectively. That is, for an ideal coin, :math:`\alpha = 1` and :math:`\beta = 0`. Note that :math:`\alpha` and :math:`\beta` are implemented as experiment parameters not because we expect to design over those values, but because a specification of each is necessary to honestly describe an experiment that was performed. :modelparam p: "Heads" probability :math:`p`. :expparam float alpha: Visibility parameter :math:`\alpha`. :expparam float beta: Visibility parameter :math:`\beta`. """ def __init__(self): super(NoisyCoinModel, self).__init__() ## PROPERTIES ## @property def n_modelparams(self): return 1 @property def expparams_dtype(self): return [('alpha','float'), ('beta','float')] @property def is_n_outcomes_constant(self): return True ## METHODS ##
[docs] @staticmethod def are_models_valid(modelparams): return np.logical_and(modelparams >= 0, modelparams <= 1).all(axis=1)
[docs] def n_outcomes(self, expparams): return 2
[docs] def likelihood(self, outcomes, modelparams, expparams): # Unpack alpha and beta. a = expparams['alpha'] b = expparams['beta'] # Find the probability of getting a "0" outcome. pr0 = modelparams * a + (1 - modelparams) * b # Concatenate over outcomes. return FiniteOutcomeModel.pr0_to_likelihood_array(outcomes, pr0)
[docs]class NDieModel(FiniteOutcomeModel): r""" Implements a model of rolling a die with n sides, whose unknown model parameters are the weights of each side; a generalization of CoinModel. An experiment consists of rolling the die once. The faces of the die are zero indexed, labeled 0,1,2,...,n-1. :param int n: Number of sides on the die. :param float threshold: How close to 1 the probabilites of the sides of the die must be. """ ## INITIALIZERS ## def __init__(self, n=6, threshold=1e-7): # We need to set this private property before # calling super, which relies on n_modelparams self._n = n super(NDieModel, self).__init__() self._threshold = threshold ## PROPERTIES ## @property def n_modelparams(self): return self._n @property def expparams_dtype(self): # This is a dummy parameter, its value doesn't come # into the likelihood. return [('exp_num', 'int')] @property def is_n_outcomes_constant(self): """ Returns ``True`` if and only if the number of outcomes for each experiment is independent of the experiment being performed. This property is assumed by inference engines to be constant for the lifetime of a Model instance. """ return True ## METHODS ##
[docs] def are_models_valid(self, modelparams): sums = np.abs(np.sum(modelparams, axis=1) - 1) <= self._threshold bounds = np.logical_and(modelparams >= 0, modelparams <= 1).all(axis=1) return np.logical_and(sums, bounds)
[docs] def n_outcomes(self, expparams): """ Returns an array of dtype ``uint`` describing the number of outcomes for each experiment specified by ``expparams``. :param numpy.ndarray expparams: Array of experimental parameters. This array must be of dtype agreeing with the ``expparams_dtype`` property. """ return self._n
[docs] def likelihood(self, outcomes, modelparams, expparams): # By calling the superclass implementation, we can consolidate # call counting there. super(NDieModel, self).likelihood(outcomes, modelparams, expparams) # Like for CoinModel, the modelparams _are_ the likelihoods; # we just need to do some tedious reshaping and tiling. L = np.concatenate([np.array([modelparams[idx][outcomes]]) for idx in range(modelparams.shape[0])]) return np.tile(L[np.newaxis,...],(expparams.shape[0],1,1)).transpose((2,1,0))