c++ – AI for OpenTTD – A* Path Finder: Follow-up and unique_ptr slow performance

Original review here: AI for OpenTTD – A* Path Finder

If you’d like to run the code yourself: https://github.com/marlonsmith10/empire_ai (use_smart_pointers branch)

Changes have been made based on the previous review. In particular, using std::priority_queue and std::unordered map have dramatically improved performance. However, using std::unique_ptr compared to raw pointers is slowing the code down by about a factor of 2. Since I’m new to unique_ptr, I would appreciate any comments on whether its being used appropriately, and whether performance can be improved. Something that stands out to me is that when using std::unique_ptr, any node in m_closed_nodes must be moved out, checked, and then moved back in. With a raw pointer, this isn’t necessary unless the node is going to be re-opened.

The code also now checks to make sure that roads can be actually built based on the slope of the current tile, so there are no longer any broken connections in roads built along the discovered path.

As in the previous review, any comments on good C++ programming practices are welcome as well.

path.hh

#ifndef PATH_HH
#define PATH_HH


#include "stdafx.h"
#include "command_func.h"
#include <queue>
#include <unordered_map>

namespace EmpireAI
{
    class Path
    {
    public:

        enum Status
        {
            IN_PROGRESS,
            FOUND,
            UNREACHABLE
        };

        Path(const TileIndex start, const TileIndex end);

        // Find a partial path from start to end, returning true if the full path has been found
        Status find(const uint16_t max_node_count = DEFAULT_NODE_COUNT_PER_FIND);

    private:

        struct Node
        {
            Node(TileIndex in_tile_index, int32 in_h)
            : tile_index(in_tile_index), h(in_h)
            {
            }

            Node()
            : tile_index(0), h(0)
            {}

            // Update the Node's g and h values, as well as its previous node. Returns true if the
            // new values are lower than the previous ones.
            bool update_costs(Node* const adjacent_node);

            const TileIndex tile_index;
            Node* previous_node = nullptr;
            int32 g = 0;
            const int32 h;
            int32 f = -1;
        };

        struct NodeCostCompare
        {
            bool operator()(const std::unique_ptr<Node>& node1, const std::unique_ptr<Node>& node2)
            {
                return node1->f > node2->f;
            }
        };


        void parse_adjacent_tile(Node* const current_node, const int8 x, const int8 y);

        // Return the corresponding node or create a new one if none is found
        std::unique_ptr<Node> get_node(const TileIndex tile_index);

        // Get the cheapest open node, returns nullptr if there are no open nodes
        std::unique_ptr<Node> cheapest_open_node();

        // Returns true if a road can be built from one node to the next
        bool nodes_can_connect_road(const Node* const node_from, const Node* const node_to) const;

        // Check this many nodes per call of find()
        static const uint16 DEFAULT_NODE_COUNT_PER_FIND = 20;

        void open_node(std::unique_ptr<Node> node);
        void close_node(std::unique_ptr<Node> node);

        Status m_status;

        Node* m_start_node;
        Node* m_end_node;
        const TileIndex m_end_tile_index;

        // Containers for open and closed nodes
        std::unordered_map<TileIndex, std::unique_ptr<Node>> m_closed_nodes;
        std::priority_queue<Node*, std::vector<std::unique_ptr<Node>>, NodeCostCompare> m_open_nodes;

    public:

        class Iterator
        {
        public:

            Iterator(const Path::Node* node)
            : m_iterator_node(node)
            {}

            bool operator==(const Iterator& iterator) const
            {
                return m_iterator_node == iterator.m_iterator_node;
            }

            const Iterator& operator=(const Path::Node* node)
            {
                m_iterator_node = node;
                return *this;
            }

            bool operator!=(const Iterator& iterator) const
            {
                return m_iterator_node != iterator.m_iterator_node;
            }

            const Iterator& operator++()
            {
                m_iterator_node = m_iterator_node->previous_node;
                return *this;
            }

            Iterator operator++(int)
            {
                Iterator iterator = *this;
                m_iterator_node = m_iterator_node->previous_node;
                return iterator;
            }

            TileIndex operator*() const
            {
                if(m_iterator_node == nullptr)
                {
                    return 0;
                }

                return m_iterator_node->tile_index;
            }

        private:
            const Path::Node* m_iterator_node;
        };

        Iterator begin()
        {
            return Iterator(m_end_node);
        }

        Iterator end()
        {
            return Iterator(m_start_node);
        }
    };
}


#endif // PATH_HH

path.cc

#include "path.hh"

#include "script_map.hpp"
#include "script_road.hpp"
#include "script_tile.hpp"
#include "map_func.h"

#include <algorithm>

using namespace EmpireAI;


Path::Path(const TileIndex start, const TileIndex end)
: m_end_tile_index(end)
{
    // Create an open node at the start
    std::unique_ptr<Node> start_node = get_node(start);
    start_node->f = start_node->h;

    // Keep a pointer to the start node, for use by the iterator once a path has been found
    m_start_node = start_node.get();

    open_node(std::move(start_node));

    m_status = IN_PROGRESS;
}


Path::Status Path::find(const uint16_t max_node_count)
{
    if(m_status != IN_PROGRESS)
    {
        return m_status;
    }

    // While not at end of path
    for(uint16 node_count = 0; node_count < max_node_count; node_count++)
    {
        // Get the cheapest open node
        std::unique_ptr<Node> current_node = cheapest_open_node();

        // If there are no open nodes, the path is unreachable
        if(current_node == nullptr)
        {
            m_status = UNREACHABLE;
            break;
        }

        // If we've reached the destination, return true
        if(current_node->tile_index == m_end_tile_index)
        {
            // Keep a pointer to the end node, for use by the iterator
            m_end_node = current_node.get();
            close_node(std::move(current_node));
            m_status = FOUND;
            break;
        }

        // Calculate the f, h, g, values of the 4 surrounding nodes
        parse_adjacent_tile(current_node.get(), 1, 0);
        parse_adjacent_tile(current_node.get(), -1, 0);
        parse_adjacent_tile(current_node.get(), 0, 1);
        parse_adjacent_tile(current_node.get(), 0, -1);

        // Mark the current node as closed
        close_node(std::move(current_node));
    }

    return m_status;
}


void Path::parse_adjacent_tile(Node* const current_node, const int8 x, const int8 y)
{
    TileIndex adjacent_tile_index = current_node->tile_index + ScriptMap::GetTileIndex(x, y);

    std::unique_ptr<Node> adjacent_node = get_node(adjacent_tile_index);

    // Check to see if this tile can be used as part of the path
    if(nodes_can_connect_road(current_node, adjacent_node.get()))
    {
        if(adjacent_node->update_costs(current_node))
        {
            open_node(std::move(adjacent_node));
        }
        else
        {
            close_node(std::move(adjacent_node));
        }
    }
    else
    {
        close_node(std::move(adjacent_node));
    }
}


bool Path::nodes_can_connect_road(const Node* const node_from, const Node* const node_to) const
{
    // The start node doesn't connect to a previous node, so we can't check it for the correct slope.
    // The pathfinder can only ensure that the next node in the path can connect to the start node.
    if(node_from->previous_node == nullptr)
    {
        return true;
    }

    int32 supports_road = ScriptRoad::CanBuildConnectedRoadPartsHere(node_from->tile_index, node_from->previous_node->tile_index, node_to->tile_index);

    if(supports_road <= 0)
    {
        return false;
    }

    if(!ScriptTile::IsBuildable(node_to->tile_index) && !ScriptRoad::IsRoadTile(node_to->tile_index))
    {
        return false;
    }

    return true;
}


std::unique_ptr<Path::Node> Path::cheapest_open_node()
{
    // While there are open nodes available
    while(!m_open_nodes.empty())
    {
        // Remove the cheapest node from the open nodes list
        std::unique_ptr<Node> current_node = std::move(const_cast<std::unique_ptr<Node>&>(m_open_nodes.top()));
        m_open_nodes.pop();

        // If this node has already been closed, discard it and skip to the next one. Duplicates are expected
        // here because get_node() doesn't check for duplicates for performance reasons.
        if(m_closed_nodes.find(current_node->tile_index) != m_closed_nodes.end())
        {
            continue;
        }

        return current_node;
    }

    // There are no more open nodes
    return nullptr;
}


std::unique_ptr<Path::Node> Path::get_node(const TileIndex tile_index)
{
    // If the node is not closed, create a new one.
    // Duplicate open nodes are considered an acceptable tradeoff since it's not easy to search std::priority_queue for
    // an already existing open node
    if(m_closed_nodes.find(tile_index) == m_closed_nodes.end())
    {
        return std::unique_ptr<Node>(new Node(tile_index, ScriptMap::DistanceManhattan(tile_index, m_end_tile_index)));
    }

    std::unique_ptr<Node> node = std::move(m_closed_nodes.at(tile_index));

    // Remove the (now null) node from the closed list
    m_closed_nodes.erase(tile_index);

    return node;
}


void Path::open_node(std::unique_ptr<Node> node)
{
    // Push the node into the open node list. Does not check open nodes, instead allowing
    // duplicates to be created in the open node priority queue, since checking for already open nodes is slower
    // than just processing a node twice.
    m_open_nodes.push(std::move(node));
}


void Path::close_node(std::unique_ptr<Node> node)
{
    m_closed_nodes(node->tile_index) = std::move(node);
}


bool Path::Node::update_costs(Node* const adjacent_node)
{
    int32 new_g = adjacent_node->g + 1;

    int32 new_f = new_g + h;

    // If this node is closed but cheaper than it was via previous path, or
    // if this is a new node (f == -1), return true to indicate the node should be opened again
    if(new_f < f || f == -1)
    {
        g = new_g;
        f = new_f;
        previous_node = adjacent_node;

        return true;
    }

    return false;
}

slowdown – How to use decimation to slow down an audio signal without changing pitch

I have been told that decimation slows down an audio signal without changing the pitch. To slow it down by 100%, you should map each sample to twice as many samples, and then output that mapped-onto signal at the original sampling rate. But I don’t know what “map onto” means, or why this works.

I’m first hoping for a mostly verbal explanation. After that, I would like an algorithmic explanation of how decimation works for audio signal processing.

Slow Parallel SQL Server query, almost instant in serial

I have a SQL Server query as follows (obfuscated):

UPDATE  (TABLE1)
SET     (COLUMN1) = CAST('N' AS CHAR(1))
FROM    (TABLE1)
WHERE   (COLUMN1 = '2' AND COLUMN2 IN('VAL1', 'VAL2', 'VAL3')) OR 
        (COLUMN1 <> 'N' AND (
                                    SELECT  COUNT(*)
                                    FROM    TABLE2 wle
                                            JOIN TABLE3 wl 
                                                    ON wl.COLUMN3 = wle.COLUMN3
                                    WHERE   TABLE1.COLUMN4 = wle.COLUMN4 AND 
                                            (wl.COLUMN5 = '1' OR wl.COLUMN6 = '1') AND 
                                            wle.COLUMN7 = (
                                                            SELECT  MIN(alias.COLUMN7)
                                                            FROM    TABLE2 AS alias
                                                            WHERE   TABLE1.COLUMN4 = alias.COLUMN4
                                                            )
                                ) > 0
                                            
        )

We have just upgraded our (test) server to SQL Server 2016 SP2 from SQL Server 2014 SP3.

The performance of the query above appears to have fallen off a cliff as a result of this.

When I run sp_whoisactive, I can see the wait_info is (48847425ms)CXCONSUMER suggesting that the query has been waiting for CXCONSUMER for the last 48847425ms (814 minutes) The query has currently been running for 13:34:07.587.

The wait info suggests that the query has been waiting on CXCONSUMER for the majority, if not all of its execution time.

This to me suggests some issue with paralellism so I ran the query with the hint OPTION (MAXDOP 1) and it finished in an acceptable time (around 30 seconds)

The plan shape is as follows.

enter image description here

The plan shape for the MAXDOP 1 query is as follows:

enter image description here

(the same plan with no parallelism operators)

When I run the parallel query with the live execution plan enabled, the operator highlighted in green (Clustered Index scan on TABLE2) shows 100% but it’s execution time continues clocking up.

The operator highlighted in red (Clustered Index Scan on TABLE1) gets “stuck” on 4 rows of 180,215.

What could be causing this problem? In my head am I thinking it is parallelism skew (uneven workload) but given that the serial query finishes in less than a minute I would have thought even if the query went parallel but only used one thread it would still complete in a time close to the serial query.
Also, given that the live plan appears to show the red clustered index scan not progressing at all I am unsure what is happening.

I have found this article that describes similar behavior though the purpose of the article appears to be showing that CXCONSUMER is not necessarily a benign wait and doesn’t say how / if it can be fixed.

In terms of fixing it, I know the code can be re-written in a more efficient way (both the COUNT and MIN subqueries could be selected into variables) but unfortunately changing the query isn’t an option I have.
I could force a MAXDOP hint but again, this means changing the code, perhaps I could force it with a plan guide, though such practice of forcing the opimizer usually isn’t advisable?

What causes this problem? Why is the query running slow in SQL Server 2016? Is there a way to fix this without changing the code?

wireless networking – Slow wifi using extender on only one device

Your privacy

By clicking “Accept all cookies”, you agree Stack Exchange can store cookies on your device and disclose information in accordance with our Cookie Policy.




display – MacBook Pro becomes terribly Slow When Using The Secondary GPU

Whenever mac starts to use the secondary GPU (AMD Radeon), such as graphical tasks or when I use a secondary display, It becomes terribly slow.

I can say that it must be a GPU problem because if I disable GPU switching from Battery preferences, it starts to become slower.

This problem occurred after the day I run a high-graphics game (CS:GO). Can this damage the GPU?

Are there any solutions that I can try?

A slow query on MS-SQL-Server 2019 cannot be optimized

In the following thread I received some important and valuable information on how I can improve a slow query on MS-SQL-Server 2019:

Helpful Link

However, that didn’t do very much.

First I deleted all non-clustered indexes and replaced them with clustered indexex. With the clustered index, some queries seem to be at least a little faster.

First question: It always seemed to me as if MS-SQL had not used the non-clustered index at all because creating a non-clustered index did not improve performance. Is that possible?

I kept getting the tip from you that I should absolutely avoid data conversions when accessing the index.

I tried that in a view. In the following script, the date values ​​are ALL nvarchars.
However, the script runs for 78 minutes! Without the SUBSTRINGs it doesn’t run any faster.

First Execution Plan

Second question: Is there a reason why this script has been running for so long?

The following query is the old version of the same script, just with different tables, but the table structure and the amount of data is roughly the same:

Second Execution Plan

The script from the Second Execution Plan runs for 7 minutes, although here “DATETIME date values” ​​are linked with “NVARCHAR date values”.
I don’t understand why this script runs so fast, although it is obvious that type conversions must take place here when the index is accessed.

The latter query is then integrated as a view in the following script:

Third Execution Plan

This insert runs for 29 minutes. Previously, on MS-SQL 2008, the same insert ran for 10 minutes.

In this script I also removed the data conversions when accessing the index, but that did not bring any performance gain at all.

This script always runs for about 30 minutes, no matter what I do, I can’t get it any faster.

Third question: Do you see a bottleneck in the third execution plan?

After all, you can see in all three execution plans that clustered index accesses take place.

Why is this Haskell program so slow

The following is a Haskell program that uses Monte Carlo to calculate pi. However, when the input is 10 million, the program ran for 20 seconds. The C program written in the same logic took only 0.206 seconds. Why is this, and how can I speed it up? Thank you all.

This is Haskell version.

import System.Random
import Draw
import Data.List
import System.Environment

montecarloCircle :: Int -> Double
montecarloCircle x = 4*fromIntegral (foldl' (x y -> if y <= 1 then x+1 else x) 0 $ zipWith    (x y -> (x**2 + y**2)) (take x $ randomRs (-1.0,1) (mkStdGen 1) :: (Double)) (take x $ randomRs (-1.0,1) (mkStdGen 2) :: (Double)) ) / fromIntegral x

main = do
    num <- getArgs
    let n = read (num !! 0)
    print $ montecarloCircle n

This is C version

#include <stdio.h>
#include <math.h>
#include <time.h>
#include <stdlib.h>

#define N       10000000

#define int_t   long // the type of N and M

// Rand from 0.0 to 1.0
double rand01()
{
    return rand()*1.0/RAND_MAX;
}

int main()
{
        srand((unsigned)time(NULL));

        double x,y;
        int_t M = 0;

        for (int_t i = 0;i < N;i++)
        {
                x = rand01();
                y = rand01();
                if (x*x+y*y<1) M++;
        }
        double pi = (double)4*M/N;
        printf("%lfn", pi);
}

Performance issue in python- network creation based on the Euclidean distance with nested for loops is too slow

I want to create a network in which the links are formed based on a similarity metric defined as the Euclidean distance between the nodes. The distance is calculated using socio-demographic features of customers such as gender and age. The problem is the code takes 200 seconds to just create the network and as I am tuning my model and the code executes at least 100 times, the long execution time of this piece is making the whole code run slowly.

So, the nodes are in fact customers. I defined a class for them. They have two attributes gender (numerical; specified by number 0 or 1) and age (varies from 24 to 44) which are stored in a csv file. I have generated a sample csv file here :

#number of customers
ncons = 5000
gender = (random.randint(0, 1) for i in range(ncons))
age = (random.randint(22, 45) for i in range(ncons))
customer_df = pd.DataFrame(
    {'customer_gender': gender,
     'customer_age': age
    })
customer_df.to_csv('customer_df.csv', mode = 'w', index=False)

The Euclidean distance delta_ik is of the form enter image description herefollowing. In the formula, n is the number of attributes (here n=2, age and gender). For customers i and k, S_f,i - S_f,k is the difference between attribute f = 1,2 which is divided by the maximum range of attribute f for all the customers (max d_f). So the distance is the distance in the values of socio-demographic attributes, not geographical positions.

Then I define the similarity metric H_ik which creates a number between 0 and 1 from delta_ik as follow:customer similarity. Finally, For customers i and k, I generate a random number rho between 0 and 1. If rho is smaller than H_ik, the nodes are connected.

So, the code that keeps delta_ik in a matrix and then uses that to generate the network looks as below:

import random
import pandas as pd
import time
import csv
import networkx as nx
import numpy as np
import math
#Read the csv file containing the part worth utilities of 184 consumers
def readCSVPWU():
    global headers
    global Attr
    Attr = ()
    with open('customer_df.csv') as csvfile:
        csvreader = csv.reader(csvfile,delimiter=',')
        headers = next(csvreader)  # skip the first row of the CSV file.
        #CSV header cells are string and should be turned to a float number.
        for i in range(len(headers)):   
            if headers(i).isnumeric():
                headers(i) = float(headers(i))
        for row in csvreader:
            AttrS = row
            Attr.append(AttrS)
    #convert strings to float numbers
    Attr = ((float(j) for j in i) for i in Attr)
    #Return the CSV as a matrix with 17 columns and 184 rows 
    return Attr

#customer class
class Customer:
    def __init__(self, PWU = None, Ut = None):
        self.Ut = Ut
        self.PWU = Attr(random.randint(0,len(Attr)-1))  # Pick random row from survey utility data  


#Generate a network by connecting nodes based on their similarity metric
def Network_generation(cust_agent):
    start_time = time.time() # track execution time

    #we form links/connections between consumeragentsbasedontheirdegreeofsocio-demographic similarity.
    global ncons
    Gcons = nx.Graph()
    #add nodes
    (Gcons.add_node(i, data = cust_agent(i)) for i in range(ncons))
    #**********Compute the node to node distance
    #Initialize Deltaik with zero's
    Deltaik = ((0 for xi in range(ncons)) for yi in range(ncons)) 
    #For each attribute, find the maximum range of that attribute; for instance max age diff = max age - min age = 53-32=21
    maxdiff = ()
    allval = ()
    #the last two columns of Attr keep income and age data
    #Make a 2D numpy array to slice the last 2 columns (#THE ACTUAL CSV FILE HAS MORE THAN 2 COLUMNS)
    np_Attr = np.array(Attr)
    #Take the last two columns, income and age of the participants, respectively
    socio = np_Attr(:, (len(Attr(0))-2, len(Attr(0))-1))
    #convert numpy array to a list of list
    socio = socio.tolist()
    #Max diff for each attribute

    for f in range(len(socio(0))):
        for node1 in Gcons.nodes():
        #keep all values of an attribute to find the max range
            allval.append((Gcons.nodes(node1)('data').PWU(-2:)(f)))
        maxdiff.append((max(allval)-min(allval)))
        allval = ()
# THE SECOND MOST TIME CONSUMING PART ********************

    for node1 in Gcons.nodes():
        for node2 in Gcons.nodes():
            tempdelta = 0
            #for each feature (attribute)
            for f in range(len(socio(0))):
                Deltaik(node1)(node2) = (Gcons.nodes(node1)('data').PWU(-2:)(f)-Gcons.nodes(node2)('data').PWU(-2:)(f))
                #max difference
                insidepar = (Deltaik(node1)(node2) / maxdiff(f))**2
                tempdelta += insidepar
            Deltaik(node1)(node2) = math.sqrt(tempdelta)
     # THE END OF THE SECOND MOST TIME CONSUMING PART ********************
       
    #Find maximum of a matrix
    maxdel = max(map(max, Deltaik))
    #Find the homopholic weight
    import copy
    Hik = copy.deepcopy(Deltaik)
    for i in range(len(Deltaik)):
        for j in range(len(Deltaik(0))):
            
            Hik(i)(j) =1 - (Deltaik(i)(j)/maxdel)
    #Define a dataframe to save Hik
    dfHik = pd.DataFrame(columns = list(range(ncons) ),index = list(range(ncons) ))
    temp_h = ()
    #For every consumer pair $i$ and $k$, a random number $rho$ from a uniform distribution $U(0,1)$ is drawn and compared with $H_{i,k}$ . The two consumers are connected in the social network if $rho$ is smaller than $H_{i,k}$~cite{wolf2015changing}.
# THE MOST TIME CONSUMING PART ********************
    for node1 in Gcons.nodes():
        for node2 in Gcons.nodes():
            #Add Hik to the dataframe
            temp_h.append(Hik(node1)(node2))
            rho = np.random.uniform(0,1,1)
            if node1 != node2:
                if rho < Hik(node1)(node2):
                    Gcons.add_edge(node1, node2)
        #Row idd for consumer idd keeps homophily with every other consumer
        dfHik.loc(node1) = temp_h
        temp_h = ()
    # nx.draw(Gcons, with_labels=True)            
    print("Simulation time: %.3f seconds" % (time.time() - start_time))
# THE END OF THE MOST TIME CONSUMING PART ********************

    return Gcons     
#%%
#number of customers
ncons = 5000
gender = (random.randint(0, 1) for i in range(ncons))
age = (random.randint(22, 39) for i in range(ncons))
customer_df = pd.DataFrame(
    {'customer_gender': gender,
     'customer_age': age
    })
customer_df.to_csv('customer_df.csv', mode = 'w', index=False)
readCSVPWU()
customer_agent = dict(enumerate((Customer(PWU = (), Ut = ()) for ij in range(ncons)))) # Ut=()
G = Network_generation(customer_agent)

I realized that there are two nested loops that are more time consuming than others, but I am not sure how to write them more efficiently. I would tremendously appreciate if you could please give me some advice on the ways to decrease the elapsed time.

Thank you so much

Slow query SQL Server 2016

I have a query that is taking more than 7 hours and doesn’t finish, as it doesn’t finish I can’t generate the actual execution plan.

Here’s brentozar’s Paste The Plan

It has a temporary table, and then a SELECT that inserts into the Temporary table.

 CREATE TABLE #Result (
     SEQ_RESULT              INT IDENTITY(1,1),
     NUM_PROCES              CHAR (20)           NOT NULL,
     DES_SISTEM_PROCES       VARCHAR (10)        NOT NULL,
     IDE_GRAU                CHAR (1)            NULL,
     COD_CLASSE_CNJ          INT                 NULL,
     COD_ORGAO_JULGAD_CNJ    INT                 NULL,
     COD_ASSUNT_CNJ          INT                 NULL,
     NUM_CPF_MAGIST          CHAR (11)           NULL,
     IDE_TEMPO               INT                 NULL,
     COD_MATERI              INT                 NULL,
     COD_EVENTO_CNJ          INT                 NULL,
     COD_ORGAO_GABINE_CNJ    INT                 NULL,
     SEQ_DESCRI_GABINE       INT                 NULL,
     SEQ_PROCES_FASE         INT                 NULL,
     DAT_HORA_FASE           DATETIME            NULL,
     DAT_HORA_FASE_ORGAO_PRINCI          DATETIME            NULL,
     COD_EVENTO_COMPLE_CNJ   INT                 NULL,
     COD_RESULT_CNJ          INT                 NULL,
     DAT_AUX_1               DATETIME            NULL,
     DAT_AUX_2               DATETIME            NULL,
     SEQ_PROCES              INT                 NULL, 
     SEQ_PROCES_INDICA       INT                 NULL,
     SEQ_FASE                BIGINT              NULL,
    EVENTO_0001 BIGINT NULL,EVENTO_0002 BIGINT NULL,
    EVENTO_0001_LOCAL BIGINT NULL,EVENTO_0002_LOCAL BIGINT NULL,
     EVENTO_PRINCIPAL        INT NULL,
     EVENTO_ORGAO_PRINCIPAL  INT NULL,
     EVENTO_FASE_AUX1        INT NULL,
     EVENTO_FASE_AUX2        INT NULL
);





INSERT INTO #result
        (
                    num_proces,
                    des_sistem_proces,
                    ide_grau,
                    cod_classe_cnj,
                    cod_orgao_julgad_cnj,
                    cod_assunt_cnj,
                    num_cpf_magist,
                    ide_tempo,
                    cod_materi,
                    cod_evento_cnj,
                    cod_orgao_gabine_cnj,
                    seq_descri_gabine,
                    seq_proces_fase,
                    dat_hora_fase,
                    dat_hora_fase_orgao_princi,
                    cod_evento_comple_cnj,
                    cod_result_cnj,
                    dat_aux_1,
                    dat_aux_2,
                    evento_0001,
                    evento_0002,
                    evento_principal,
                    evento_orgao_principal,
                    evento_fase_aux1,
                    evento_fase_aux2
        )
SELECT num_proces,
   des_sistem_proces,
   ide_grau,
   cod_classe_cnj,
   cod_orgao_julgad_cnj,
   cod_assunto,
   num_cpf_magist,
   ide_tempo,
   cod_materi,
   cod_evento_cnj,
   cod_orgao_gabine_cnj,
   cod_descri_gabine,
   seq_proces_fase,
   dat_hora_fase,
   dat_hora_fase_orgao_principal,
   cod_evento_comple_cnj,
   cod_result_cnj,
   dat_aux_1,
   dat_aux_2,
   evento_0001,
   evento_0002,
   tabela_principal,
   tabela_orgao_principal,
   tabela_auxiliar_1,
   tabela_auxiliar_2
FROM   (
                            
    SELECT
        NUM_PROCES,
        (   SELECT
            TOP 1 DES_SISTEM_PROCES   
        FROM
            T_FASES_CALCULO  
        WHERE
            IDE_GRAU = BASE.IDE_GRAU 
            AND NUM_PROCES = BASE.NUM_PROCES 
            AND DAT_HORA_FASE <= '2021-01-31 23:59:59.997'  
        ORDER BY
            SEQ  ) AS 'DES_SISTEM_PROCES',
        IDE_GRAU,
        SEQ_PROCES,
        SEQ_PROCES_GRAU,
        COD_CLASSE_CNJ,
        COD_ORGAO_JULGAD_CNJ,
        (SELECT
            TOP 1 HA.COD_ASSUNT_CNJ 
        FROM
            HIST_PROCESSO_ASSUNTO HA 
        WHERE
            HA.IDE_ASSUNT_PRINCI = 'S' 
            AND HA.NUM_PROCES = BASE.NUM_PROCES 
            AND HA.DES_SISTEM_PROCES = BASE.DES_SISTEM_PROCES 
            AND HA.TIP_OPERA = 'E' 
            AND HA.DAT_ASSUNT_MOVIME <= BASE.DAT_HORA_FASE 
        ORDER BY
            HA.DAT_ASSUNT_MOVIME DESC) AS COD_ASSUNTO,
        NUM_CPF_MAGIST,
        1225 AS IDE_TEMPO,
        COD_MATERI,
        COD_EVENTO_CNJ,
        COD_ORGAO_GABINE_CNJ,
        (SELECT
            TOP 1 SEQ_DESCRI_GABINE 
        FROM
            D_DESCRICAO_GABINETE GAB 
        WHERE
            GAB.COD_GABINE_CNJ = BASE.COD_ORGAO_GABINE_CNJ 
            AND (
                (
                    BASE.DAT_HORA_FASE >= '2021-01-01 00:00:00.000' 
                    AND GAB.DAT_INICIO <= BASE.DAT_HORA_FASE
                ) 
                OR (
                    BASE.DAT_HORA_FASE < '2021-01-01 00:00:00.000' 
                    AND GAB.DAT_INICIO <= '2021-01-31 23:59:59.997'
                )
            ) 
        ORDER BY
            DAT_INICIO DESC) AS COD_DESCRI_GABINE,
        SEQ_PROCES_FASE,
        DAT_HORA_FASE,
        DAT_HORA_FASE_ORGAO_PRINCIPAL,
        COD_EVENTO_COMPLE_CNJ,
        COD_RESULT_CNJ,
        DAT_AUX_1,
        DAT_AUX_2,
        EVENTO_0001,
        EVENTO_0002,
        TABELA_PRINCIPAL,
        TABELA_ORGAO_PRINCIPAL,
        TABELA_AUXILIAR_1,
        TABELA_AUXILIAR_2 
    FROM
        ( SELECT
            DISTINCT AUTO_INDICADOR_0647_EVENTO_0001.NUM_PROCES,
            AUTO_INDICADOR_0647_EVENTO_0001.DES_SISTEM_PROCES,
            AUTO_INDICADOR_0647_EVENTO_0001.IDE_GRAU,
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES,
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES_GRAU,
            AUTO_INDICADOR_0647_EVENTO_0001.DAT_HORA_FASE as 'DAT_HORA_FASE',
            AUTO_INDICADOR_0647_EVENTO_0001.DAT_HORA_FASE AS 'DAT_HORA_FASE_ORGAO_PRINCIPAL',
            AUTO_INDICADOR_0647_EVENTO_0001.COD_EVENTO_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_ORGAO_JULGAD_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_ORGAO_GABINE_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.NUM_CPF_MAGIST,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_EVENTO_COMPLE_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_RESULT_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES_FASE,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_MATERI,
            AUTO_INDICADOR_0647_EVENTO_0001.COD_CLASSE_CNJ,
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ,
            AUTO_INDICADOR_0647_EVENTO_0001.DAT_CARGA,
            NULL AS 'DAT_AUX_1',
            NULL AS 'DAT_AUX_2',
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES_FASE AS 'EVENTO_0001',
            AUTO_INDICADOR_0647_EVENTO_0002.SEQ_PROCES_FASE AS 'EVENTO_0002',
            1 as 'TABELA_PRINCIPAL',
            1 as 'TABELA_ORGAO_PRINCIPAL',
            NULL as 'TABELA_AUXILIAR_1',
            NULL as 'TABELA_AUXILIAR_2',
            ROW_NUMBER() OVER (PARTITION 
        BY
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES_GRAU 
        ORDER BY
            AUTO_INDICADOR_0647_EVENTO_0001.SEQ) AS RN 
        FROM
            (SELECT
                REL.* 
            FROM
                ( SELECT
                    * 
                FROM
                    ( SELECT
                        F.COD_CLASSE_CNJ,
                        F.COD_EVENTO_COMPLE_CNJ,
                        F.COD_EVENTO_CNJ,
                        F.COD_MATERI,
                        F.COD_ORGAO_GABINE_CNJ,
                        F.COD_ORGAO_JULGAD_CNJ,
                        F.SEQ_RESULT_COMPLE AS COD_RESULT_CNJ,
                        F.NUM_CPF_MAGIST,
                        REL.DAT_FASE AS DAT_HORA_FASE,
                        REL.SEQ_PROCES_FASE,
                        REL.NUM_REGIST_JUDICI AS NUM_PROCES,
                        REL.IDE_SISTEM AS DES_SISTEM_PROCES,
                        REL.DAT_AUXILI_1 AS DAT_AUX_1,
                        REL.DAT_AUXILI_2 AS DAT_AUX_2,
                        NUM.SEQ_PROCES,
                        NUM.SEQ_PROCES_GRAU,
                        T.SEQ,
                        T.IDE_GRAU,
                        T.DAT_CARGA 
                    FROM
                        F_JUSTICA_NUMERO F WITH (NOLOCK) 
                    JOIN
                        REL_FATO_PROCESSO REL WITH (NOLOCK) 
                            ON F.SEQ_JUSTIC_NUMERO = REL.SEQ_JUSTIC_NUMERO 
                    JOIN
                        SISTEMA_PROCESSUAL S WITH (NOLOCK) 
                            ON REL.IDE_SISTEM = S.DES_SISTEM_PROCES 
                    JOIN
                        NUMERO_PROCESSO NUM WITH (NOLOCK) 
                            ON NUM.NUM_PROCES = REL.NUM_REGIST_JUDICI 
                            AND NUM.SEQ_SISTEM = S.SEQ_SISTEM_PROCES 
                    OUTER APPLY ( SELECT
                        TOP 1 * FROM
                            T_FASES_CALCULO WITH (NOLOCK) 
                        WHERE
                            SEQ_PROCES_FASE = REL.SEQ_PROCES_FASE 
                            OR ( SEQ_PROCES_GRAU = NUM.SEQ_PROCES_GRAU 
                            AND DAT_HORA_FASE = REL.DAT_FASE ) 
                        ORDER BY
                            SEQ ) T 
                    WHERE
                        1=1 
                        AND SEQ_INDICA_COLUNA = 602 
                        AND IDE_TEMPO = 1225 
                    ) UNION_0001) REL ) AUTO_INDICADOR_0647_EVENTO_0001 
            OUTER APPLY (SELECT
                REL.* FROM
                    ( SELECT
                        * 
                    FROM
                        ( SELECT
                            F.COD_CLASSE_CNJ,
                            F.COD_EVENTO_COMPLE_CNJ,
                            F.COD_EVENTO_CNJ,
                            F.COD_MATERI,
                            F.COD_ORGAO_GABINE_CNJ,
                            F.COD_ORGAO_JULGAD_CNJ,
                            F.SEQ_RESULT_COMPLE AS COD_RESULT_CNJ,
                            F.NUM_CPF_MAGIST,
                            REL.DAT_FASE AS DAT_HORA_FASE,
                            REL.SEQ_PROCES_FASE,
                            REL.NUM_REGIST_JUDICI AS NUM_PROCES,
                            REL.IDE_SISTEM AS DES_SISTEM_PROCES,
                            REL.DAT_AUXILI_1 AS DAT_AUX_1,
                            REL.DAT_AUXILI_2 AS DAT_AUX_2,
                            NUM.SEQ_PROCES,
                            NUM.SEQ_PROCES_GRAU,
                            T.SEQ,
                            T.IDE_GRAU,
                            T.DAT_CARGA 
                        FROM
                            F_JUSTICA_NUMERO F WITH (NOLOCK) 
                        JOIN
                            REL_FATO_PROCESSO REL WITH (NOLOCK) 
                                ON F.SEQ_JUSTIC_NUMERO = REL.SEQ_JUSTIC_NUMERO 
                        JOIN
                            SISTEMA_PROCESSUAL S WITH (NOLOCK) 
                                ON REL.IDE_SISTEM = S.DES_SISTEM_PROCES 
                        JOIN
                            NUMERO_PROCESSO NUM WITH (NOLOCK) 
                                ON NUM.NUM_PROCES = REL.NUM_REGIST_JUDICI 
                                AND NUM.SEQ_SISTEM = S.SEQ_SISTEM_PROCES 
                                AND NUM.SEQ_PROCES_GRAU = AUTO_INDICADOR_0647_EVENTO_0001.SEQ_PROCES_GRAU 
                        OUTER APPLY ( SELECT
                            TOP 1 * FROM
                                T_FASES_CALCULO WITH (NOLOCK) 
                            WHERE
                                SEQ_PROCES_FASE = REL.SEQ_PROCES_FASE 
                                OR ( SEQ_PROCES_GRAU = NUM.SEQ_PROCES_GRAU 
                                AND DAT_HORA_FASE = REL.DAT_FASE ) 
                            ORDER BY
                                SEQ ) T 
                        WHERE
                            1=1 
                            AND SEQ_INDICA_COLUNA = 220 
                            AND IDE_TEMPO = 1225 ) UNION_0001) REL 
                    WHERE
                        AUTO_INDICADOR_0647_EVENTO_0001.SEQ IS NOT NULL) AUTO_INDICADOR_0647_EVENTO_0002 
                    WHERE
                        (
                            AUTO_INDICADOR_0647_EVENTO_0001.SEQ IS NOT NULL 
                        ) 
                        AND (
                            AUTO_INDICADOR_0647_EVENTO_0002.SEQ IS NULL 
                        ) ) BASE 
                WHERE
                    BASE.RN = 1
                        ) CONSULTA

Can someone help me ?
Thank you so much.

python 3.x – Pymongo Update Is slow

i am implementing a crawler and data that should i crawl is store in mongodb.
in other word in my mongodb is list of a job and my crawler worker fed from it.hence first i need to update a field on mongo and by time it become very slow.
can u help me why and what should i do ?

                        query_update = {'id':item('id'),'gather_type':item('gather_type')}
                    new_value = {'status':'inprogress'}
                    my_feeder.mongodb_connection.updateMany(collection='items',query=query_update,new_value=new_value)