Generated by Cython 3.0.12

Yellow lines hint at Python interaction.
Click on a line that starts with a "+" to see the C code that Cython generated for it.

Raw output: partial_derivative.c

+01: # To compile:  #warning "Using deprecated NumPy API, disable it by #defining NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
  __pyx_t_7 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_7) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 02: # python setup.py build_ext --inplace --force
 03: 
 04: # "cimport" is used to import special compile-time information
 05: # about the numpy module (this is stored in a file numpy.pxd which is
 06: # currently part of the Cython distribution).
 07: 
 08: cimport cython
 09: cimport numpy as np
 10: 
+11: import numpy as np
  __pyx_t_7 = __Pyx_ImportDottedModule(__pyx_n_s_numpy, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 11, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_7) < 0) __PYX_ERR(1, 11, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 12: from cython.parallel import prange
 13: 
 14: ctypedef np.float64_t DTYPE_t
 15: 
+16: @cython.cdivision(True)
/* Python wrapper */
static PyObject *__pyx_pw_18partial_derivative_1partial_derivative(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
); /*proto*/
PyDoc_STRVAR(__pyx_doc_18partial_derivative_partial_derivative, "\n    COmputes partial derivatives for the error propagation\n    Parameters\n    ============\n\n    inv_delta_wave : np.ndarray\n        Precomputed values of 1/(x[i+1] - x[i])\n    n_threads : int\n        Number of threads to use \n    ");
static PyMethodDef __pyx_mdef_18partial_derivative_1partial_derivative = {"partial_derivative", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_18partial_derivative_1partial_derivative, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_18partial_derivative_partial_derivative};
static PyObject *__pyx_pw_18partial_derivative_1partial_derivative(PyObject *__pyx_self, 
#if CYTHON_METH_FASTCALL
PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds
#else
PyObject *__pyx_args, PyObject *__pyx_kwds
#endif
) {
  __Pyx_memviewslice __pyx_v_inv_h_mat = { 0, 0, { 0 }, { 0 }, { 0 } };
  __Pyx_memviewslice __pyx_v_inv_delta_wave = { 0, 0, { 0 }, { 0 }, { 0 } };
  int __pyx_v_index_to_calc;
  __Pyx_memviewslice __pyx_v_output = { 0, 0, { 0 }, { 0 }, { 0 } };
  CYTHON_UNUSED int __pyx_v_n_threads;
  #if !CYTHON_METH_FASTCALL
  CYTHON_UNUSED Py_ssize_t __pyx_nargs;
  #endif
  CYTHON_UNUSED PyObject *const *__pyx_kwvalues;
  PyObject *__pyx_r = 0;
  __Pyx_RefNannyDeclarations
  __Pyx_RefNannySetupContext("partial_derivative (wrapper)", 0);
  #if !CYTHON_METH_FASTCALL
  #if CYTHON_ASSUME_SAFE_MACROS
  __pyx_nargs = PyTuple_GET_SIZE(__pyx_args);
  #else
  __pyx_nargs = PyTuple_Size(__pyx_args); if (unlikely(__pyx_nargs < 0)) return NULL;
  #endif
  #endif
  __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs);
  {
    PyObject **__pyx_pyargnames[] = {&__pyx_n_s_inv_h_mat,&__pyx_n_s_inv_delta_wave,&__pyx_n_s_index_to_calc,&__pyx_n_s_output,&__pyx_n_s_n_threads,0};
  PyObject* values[5] = {0,0,0,0,0};
    if (__pyx_kwds) {
      Py_ssize_t kw_args;
      switch (__pyx_nargs) {
        case  5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4);
        CYTHON_FALLTHROUGH;
        case  4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3);
        CYTHON_FALLTHROUGH;
        case  3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2);
        CYTHON_FALLTHROUGH;
        case  2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1);
        CYTHON_FALLTHROUGH;
        case  1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
        CYTHON_FALLTHROUGH;
        case  0: break;
        default: goto __pyx_L5_argtuple_error;
      }
      kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds);
      switch (__pyx_nargs) {
        case  0:
        if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_inv_h_mat)) != 0)) {
          (void)__Pyx_Arg_NewRef_FASTCALL(values[0]);
          kw_args--;
        }
        else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
        else goto __pyx_L5_argtuple_error;
        CYTHON_FALLTHROUGH;
        case  1:
        if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_inv_delta_wave)) != 0)) {
          (void)__Pyx_Arg_NewRef_FASTCALL(values[1]);
          kw_args--;
        }
        else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
        else {
          __Pyx_RaiseArgtupleInvalid("partial_derivative", 1, 5, 5, 1); __PYX_ERR(1, 16, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  2:
        if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_index_to_calc)) != 0)) {
          (void)__Pyx_Arg_NewRef_FASTCALL(values[2]);
          kw_args--;
        }
        else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
        else {
          __Pyx_RaiseArgtupleInvalid("partial_derivative", 1, 5, 5, 2); __PYX_ERR(1, 16, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  3:
        if (likely((values[3] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_output)) != 0)) {
          (void)__Pyx_Arg_NewRef_FASTCALL(values[3]);
          kw_args--;
        }
        else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
        else {
          __Pyx_RaiseArgtupleInvalid("partial_derivative", 1, 5, 5, 3); __PYX_ERR(1, 16, __pyx_L3_error)
        }
        CYTHON_FALLTHROUGH;
        case  4:
        if (likely((values[4] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_n_threads)) != 0)) {
          (void)__Pyx_Arg_NewRef_FASTCALL(values[4]);
          kw_args--;
        }
        else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error)
        else {
          __Pyx_RaiseArgtupleInvalid("partial_derivative", 1, 5, 5, 4); __PYX_ERR(1, 16, __pyx_L3_error)
        }
      }
      if (unlikely(kw_args > 0)) {
        const Py_ssize_t kwd_pos_args = __pyx_nargs;
        if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "partial_derivative") < 0)) __PYX_ERR(1, 16, __pyx_L3_error)
      }
    } else if (unlikely(__pyx_nargs != 5)) {
      goto __pyx_L5_argtuple_error;
    } else {
      values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0);
      values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1);
      values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2);
      values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3);
      values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4);
    }
    __pyx_v_inv_h_mat = __Pyx_PyObject_to_MemoryviewSlice_dsds_nn___pyx_t_18partial_derivative_DTYPE_t__const__(values[0], 0); if (unlikely(!__pyx_v_inv_h_mat.memview)) __PYX_ERR(1, 19, __pyx_L3_error)
    __pyx_v_inv_delta_wave = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_18partial_derivative_DTYPE_t__const__(values[1], 0); if (unlikely(!__pyx_v_inv_delta_wave.memview)) __PYX_ERR(1, 19, __pyx_L3_error)
    __pyx_v_index_to_calc = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_index_to_calc == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 19, __pyx_L3_error)
    __pyx_v_output = __Pyx_PyObject_to_MemoryviewSlice_ds_nn___pyx_t_18partial_derivative_DTYPE_t(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_output.memview)) __PYX_ERR(1, 19, __pyx_L3_error)
    __pyx_v_n_threads = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_n_threads == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 19, __pyx_L3_error)
  }
  goto __pyx_L6_skip;
  __pyx_L5_argtuple_error:;
  __Pyx_RaiseArgtupleInvalid("partial_derivative", 1, 5, 5, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error)
  __pyx_L6_skip:;
  goto __pyx_L4_argument_unpacking_done;
  __pyx_L3_error:;
  {
    Py_ssize_t __pyx_temp;
    for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
      __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]);
    }
  }
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_inv_h_mat, 1);
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_inv_delta_wave, 1);
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_output, 1);
  __Pyx_AddTraceback("partial_derivative.partial_derivative", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __Pyx_RefNannyFinishContext();
  return NULL;
  __pyx_L4_argument_unpacking_done:;
  __pyx_r = __pyx_pf_18partial_derivative_partial_derivative(__pyx_self, __pyx_v_inv_h_mat, __pyx_v_inv_delta_wave, __pyx_v_index_to_calc, __pyx_v_output, __pyx_v_n_threads);
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;

  /* function exit code */
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_inv_h_mat, 1);
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_inv_delta_wave, 1);
  __PYX_XCLEAR_MEMVIEW(&__pyx_v_output, 1);
  {
    Py_ssize_t __pyx_temp;
    for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) {
      __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]);
    }
  }
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}

static PyObject *__pyx_pf_18partial_derivative_partial_derivative(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_inv_h_mat, __Pyx_memviewslice __pyx_v_inv_delta_wave, int __pyx_v_index_to_calc, __Pyx_memviewslice __pyx_v_output, CYTHON_UNUSED int __pyx_v_n_threads) {
  Py_ssize_t __pyx_v_x_max;
  __pyx_t_18partial_derivative_DTYPE_t __pyx_v_k_entry;
  Py_ssize_t __pyx_v_mat_ind;
  Py_ssize_t __pyx_v_k;
  Py_ssize_t __pyx_v_index_i;
  PyObject *__pyx_r = NULL;
/* … */
  /* function exit code */
  __pyx_r = Py_None; __Pyx_INCREF(Py_None);
  __Pyx_XGIVEREF(__pyx_r);
  __Pyx_RefNannyFinishContext();
  return __pyx_r;
}
/* … */
  __pyx_tuple__22 = PyTuple_Pack(10, __pyx_n_s_inv_h_mat, __pyx_n_s_inv_delta_wave, __pyx_n_s_index_to_calc, __pyx_n_s_output, __pyx_n_s_n_threads, __pyx_n_s_x_max, __pyx_n_s_k_entry, __pyx_n_s_mat_ind, __pyx_n_s_k, __pyx_n_s_index_i); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_tuple__22);
  __Pyx_GIVEREF(__pyx_tuple__22);
/* … */
  __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_18partial_derivative_1partial_derivative, 0, __pyx_n_s_partial_derivative, NULL, __pyx_n_s_partial_derivative, __pyx_d, ((PyObject *)__pyx_codeobj__23)); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_7);
  if (PyDict_SetItem(__pyx_d, __pyx_n_s_partial_derivative, __pyx_t_7) < 0) __PYX_ERR(1, 16, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
 17: @cython.boundscheck(False)
 18: @cython.wraparound(False)
 19: def partial_derivative(const DTYPE_t[:,:] inv_h_mat, const DTYPE_t[:] inv_delta_wave, const int index_to_calc, DTYPE_t[:] output, const int n_threads):
 20:     """
 21:     COmputes partial derivatives for the error propagation
 22:     Parameters
 23:     ============
 24: 
 25:     inv_delta_wave : np.ndarray
 26:         Precomputed values of 1/(x[i+1] - x[i])
 27:     n_threads : int
 28:         Number of threads to use 
 29:     """
 30: 
+31:     cdef Py_ssize_t x_max = output.shape[0]
  __pyx_v_x_max = (__pyx_v_output.shape[0]);
 32: 
+33:     cdef DTYPE_t k_entry = 0
  __pyx_v_k_entry = 0.0;
 34: 
+35:     cdef Py_ssize_t mat_ind = 0
  __pyx_v_mat_ind = 0;
+36:     cdef Py_ssize_t k = 0
  __pyx_v_k = 0;
 37: 
+38:     cdef Py_ssize_t index_i = index_to_calc
  __pyx_v_index_i = __pyx_v_index_to_calc;
+39:     for k in prange(1, x_max-1, nogil=True, num_threads=n_threads):
  {
      #ifdef WITH_THREAD
      PyThreadState *_save;
      _save = NULL;
      Py_UNBLOCK_THREADS
      __Pyx_FastGIL_Remember();
      #endif
      /*try:*/ {
        __pyx_t_1 = (__pyx_v_x_max - 1);
        {
            #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
                #undef likely
                #undef unlikely
                #define likely(x)   (x)
                #define unlikely(x) (x)
            #endif
            __pyx_t_3 = (__pyx_t_1 - 1 + 1 - 1/abs(1)) / 1;
            if (__pyx_t_3 > 0)
            {
                #ifdef _OPENMP
                #pragma omp parallel
                #endif /* _OPENMP */
                {
                    #ifdef _OPENMP
                    #pragma omp for firstprivate(__pyx_v_k) lastprivate(__pyx_v_k) lastprivate(__pyx_v_k_entry) lastprivate(__pyx_v_mat_ind)        __pyx_t_1 = (__pyx_v_x_max - 1);
        {
            #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
                #undef likely
                #undef unlikely
                #define likely(x)   (x)
                #define unlikely(x) (x)
            #endif
            __pyx_t_3 = (__pyx_t_1 - 1 + 1 - 1/abs(1)) / 1;
            if (__pyx_t_3 > 0)
            {
                #ifdef _OPENMP
                #pragma omp parallel
                #endif /* _OPENMP */
                {
                    #ifdef _OPENMP
                    #pragma omp for firstprivate(__pyx_v_k) lastprivate(__pyx_v_k) lastprivate(__pyx_v_k_entry) lastprivate(__pyx_v_mat_ind) num_threads(__pyx_v_n_threads)
                    #endif /* _OPENMP */
                    for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){
                        {
                            __pyx_v_k = (Py_ssize_t)(1 + 1 * __pyx_t_2);
                            /* Initialize private variables to invalid values */
                            __pyx_v_k_entry = ((__pyx_t_18partial_derivative_DTYPE_t)__PYX_NAN());
                            __pyx_v_mat_ind = ((Py_ssize_t)0xbad0bad0);
/* … */
      /*finally:*/ {
        /*normal exit:*/{
          #ifdef WITH_THREAD
          __Pyx_FastGIL_Forget();
          Py_BLOCK_THREADS
          #endif
          goto __pyx_L5;
        }
        __pyx_L5:;
      }
  }
+40:         k_entry = 0
                            __pyx_v_k_entry = 0.0;
+41:         mat_ind = k-1
                            __pyx_v_mat_ind = (__pyx_v_k - 1);
+42:         if 1 <= k <= x_max -2 :
                            __pyx_t_4 = (1 <= __pyx_v_k);
                            if (__pyx_t_4) {
                              __pyx_t_4 = (__pyx_v_k <= (__pyx_v_x_max - 2));
                            }
                            if (__pyx_t_4) {
/* … */
                            }
 43:             # entry that goes from j = [2, N -1]  (in python: [1, N-2]) 
 44:             # we have to discard the last value
+45:             k_entry = k_entry -  inv_h_mat[index_i - 1, mat_ind] * (inv_delta_wave[k] + inv_delta_wave[k-1])
                              __pyx_t_5 = (__pyx_v_index_i - 1);
                              __pyx_t_6 = __pyx_v_mat_ind;
                              __pyx_t_7 = __pyx_v_k;
                              __pyx_t_8 = (__pyx_v_k - 1);
                              __pyx_v_k_entry = (__pyx_v_k_entry - ((*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_inv_h_mat.data + __pyx_t_5 * __pyx_v_inv_h_mat.strides[0]) ) + __pyx_t_6 * __pyx_v_inv_h_mat.strides[1]) ))) * ((*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=0 */ (__pyx_v_inv_delta_wave.data + __pyx_t_7 * __pyx_v_inv_delta_wave.strides[0]) ))) + (*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=0 */ (__pyx_v_inv_delta_wave.data + __pyx_t_8 * __pyx_v_inv_delta_wave.strides[0]) ))))));
+46:         if k <= x_max - 3 :
                            __pyx_t_4 = (__pyx_v_k <= (__pyx_v_x_max - 3));
                            if (__pyx_t_4) {
/* … */
                            }
 47:             # entry that goes from j = [1, N -2]  (in python: [0, N-3]) 
 48:             # we have to discard the last two values
+49:             k_entry = k_entry + inv_h_mat[index_i - 1, mat_ind+1] * inv_delta_wave[k]
                              __pyx_t_8 = (__pyx_v_index_i - 1);
                              __pyx_t_7 = (__pyx_v_mat_ind + 1);
                              __pyx_t_6 = __pyx_v_k;
                              __pyx_v_k_entry = (__pyx_v_k_entry + ((*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_inv_h_mat.data + __pyx_t_8 * __pyx_v_inv_h_mat.strides[0]) ) + __pyx_t_7 * __pyx_v_inv_h_mat.strides[1]) ))) * (*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=0 */ (__pyx_v_inv_delta_wave.data + __pyx_t_6 * __pyx_v_inv_delta_wave.strides[0]) )))));
+50:         if k >= 2:  # entry from the Sum that starts at j == 3  (in oython, j == 2)
                            __pyx_t_4 = (__pyx_v_k >= 2);
                            if (__pyx_t_4) {
/* … */
                            }
 51:             # we have to discard the first two values
+52:             k_entry = k_entry + inv_h_mat[index_i - 1, mat_ind-1] * inv_delta_wave[k-1]
                              __pyx_t_6 = (__pyx_v_index_i - 1);
                              __pyx_t_7 = (__pyx_v_mat_ind - 1);
                              __pyx_t_8 = (__pyx_v_k - 1);
                              __pyx_v_k_entry = (__pyx_v_k_entry + ((*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_inv_h_mat.data + __pyx_t_6 * __pyx_v_inv_h_mat.strides[0]) ) + __pyx_t_7 * __pyx_v_inv_h_mat.strides[1]) ))) * (*((__pyx_t_18partial_derivative_DTYPE_t const  *) ( /* dim=0 */ (__pyx_v_inv_delta_wave.data + __pyx_t_8 * __pyx_v_inv_delta_wave.strides[0]) )))));
+53:         output[k] = k_entry
                            __pyx_t_8 = __pyx_v_k;
                            *((__pyx_t_18partial_derivative_DTYPE_t *) ( /* dim=0 */ (__pyx_v_output.data + __pyx_t_8 * __pyx_v_output.strides[0]) )) = __pyx_v_k_entry;
                        }
                    }
                }
            }
        }
        #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
            #undef likely
            #undef unlikely
            #define likely(x)   __builtin_expect(!!(x), 1)
            #define unlikely(x) __builtin_expect(!!(x), 0)
        #endif
      }
 54: 
 55: