javascript – Fetching data from endpoint, retrieving coordinates, then merging into an output

I’m in no way a skilled developer (or even a developer at that!) but I did want to take a go at writing a script that fetched one endpoint’s data, extracted some data, sent if off to be geocoded, then saved both the original (with geocodes) and the geocoded data to file. On subsequent requests, it would look up the geocoded data on file to check if anything matched, and pull data from there, otherwise trigger the fetch request again.

I think it works – at least it’s been working from what I can tell but I’d love a review (or improvement if that’s your thing) 🙂

#!/usr/bin/env node

'use strict';

//
// MARK: - modules
//
const fs = require('fs');
const fetch = require('node-fetch');


//
// MARK: create the variables / constants
//

// get the command line arguments
const args = (function(argv) {

    // remove `node` and `script` name
    argv = argv.slice(2);

    // returned object
    var args = {};
    var argName, argValue;

    // loop through each argument
    argv.forEach(function(arg, index) {

        // seperate argument, for a key/value return
        arg = arg.split('=');

        // retrieve the argument name
        argName = arg(0);

        // remove "--" or "-"
        if (argName.indexOf('-') === 0) {
            argName = argName.slice(
                argName.slice(0, 2).lastIndexOf('-') + 1
            );
        }

        // associate defined value or initialize it to "true" state
        argValue = (arg.length === 2) ?

            // check if argument is valid number
            parseFloat(arg(1)).toString() === arg(1)

        ?
        +arg(1): arg(1)

        :
        true;

        // finally add the argument to the args set
        args(argName) = argValue;
    });

    return args;
})(process.argv);



// api key checks
const apiKey = function() {

    // api not declared
    if (!args.api) {
        return 1;
    }

    // declared but empty
    if (args.api === true) {
        return 2;
    }

    // all good
    if (args.api && args.api !== true) {
        return args.api;
    }

}(args);



//
// MARK: - check the files and directories we need
//
const req = (() => {

    // where we store things
    const directory = './docs';
    const datavic = `${ directory }/datavic.json`;
    const database = `./database.json`;

    // checks and creation
    if (!fs.existsSync(directory)) {
        fs.mkdirSync(directory);
    }

    // array of items
    return {
        dir: directory,
        api: datavic,
        db: database
    }
})();



//
// MARK: - the script initialiser
//
function init() {
    console.time();
    fetchExposures();
    console.timeEnd();
}



//
// MARK: - 1. fetch the data from datavic
//
function fetchExposures() {

    // api check
    if (apiKey === 1) {
        return console.error(
            '(x) Exiting: API argument not declared!'
        );
    }
    if (apiKey === 2) {
        return console.error(
            '(x) Exiting: API key not provided!'
        );
    }

    // url: Data Victoria API
    const url = new URL(
        'https://discover.data.vic.gov.au/api/3/action/datastore_search'
    );

    // url: parameters
    url.searchParams.append(
        'resource_id', 'afb52611-6061-4a2b-9110-74c920bede77'
    );

    url.searchParams.append(
        'limit', '10000'
    );

    // fetch the data
    fetch(url)
        .then(res => res.json())

    .then(async data => {

        // send off data to be checked against database
        const valid = await validateData(data);

        // send off data to be checked against database
        const database = await checkDatabase(valid);

        // fetch the coordinates
        const coordinates = await fetchCoordinates(database);

        // add the coordinates to api file
        const api = await addCoordinates(data, coordinates);

        // write items to file
        await writeFiles(api, coordinates);
    })

    .catch(err => {
        console.error(`(x) Error: ${ err }`);
    });
}



//
// MARK: - 2. remove the bad characters
//
function validateData(input) {

    const sanitised = JSON.stringify(input, null, 2)
        .replace(/\t/g, '')
        .replace(/\r/g, '')
        .replace(/\n/g, '')
        .replace(/\v/g, '')
        .replace(/\h/g, '');

    return JSON.parse(sanitised);
}



//
// MARK: - 3. check if any of the results are in the database
//
function checkDatabase(input) {

    // 3a. exlude any that have null address or postcode
    const inputNoNull = input.result.records.filter(item => {
        if (!(item.Site_streetaddress === null ||
                item.Site_postcode === null)) {
            return item
        }
    })

    // -- select only certain fields
    .map(item => ({
        Suburb: item.Suburb,
        Site_streetaddress: item.Site_streetaddress,
        Site_state: item.Site_state,
        Site_postcode: item.Site_postcode
    }))

    // -- sort by ID
    .sort((a, b) => {
        return a._id - b._id;
    });

    // -- get the unique data
    const data = (...new Set(
        inputNoNull.map(item1 =>
            item1.Suburb +
            item1.Site_streetaddress +
            item1.Site_state +
            item1.Site_postcode
        )
    )).map(
        item1 => inputNoNull.find(
            item2 =>
            item2.Suburb +
            item2.Site_streetaddress +
            item2.Site_state +
            item2.Site_postcode ==
            item1
        )
    );

    // -- read the database
    const db = fs.readFileSync(req.db);
    const database = db ? JSON.parse(db) : {};

    // append new items
    const mergeJSON = ((file1, file2) =>
        Object.values((...file1, ...file2)
            .reduce((left, right) => {
                const key = `${right.Suburb} ${right.Site_streetaddress} ${right.Site_state} ${right.Site_postcode}`;
                left(key) = left(key) || right;

                return left;
            }, {})
        )
    );

    // return it
    return mergeJSON(database, data);
}



//
// MARK: - 4. loop all items in database, fetch the coordinates
//
async function fetchCoordinates(input) {

    let geocodedSites = ();

    // loop over the locations
    for (let x = 0; x < input.length; x++) {
        try {
            const result = await getGeocode(input(x));
            geocodedSites.push(result);
        } catch (err) {
            console.log(`(x) Error: ${ err }`);
        }
    }

    // return it
    return geocodedSites;
}



//
// MARK: - 5. re-check the results are in the database
//
function addCoordinates(data, coordinates) {

    const dataapi = data.result.records;

    let arr = ();

    // loop over the locations
    for (let x = 0; x < dataapi.length; x++) {
        coordinates.filter(item2 => {

            if (dataapi(x).Suburb === item2.Suburb &&
                dataapi(x).Site_streetaddress === item2.Site_streetaddress &&
                dataapi(x).Site_state === item2.Site_state &&
                dataapi(x).Site_postcode === item2.Site_postcode
            ) {
                // -- append the coordinates to the items
                const fulldata = {
                    ...dataapi(x),
                    latitude: item2.latitude,
                    longitude: item2.longitude
                }
                arr.push(fulldata);
            }
        });
    }

    // return it
    return arr;
}



//
// MARK: - 6. write files to system
//
function writeFiles(file1, file2) {

    // stringify the inputs
    const stringFile1 = JSON.stringify(file1, undefined, 2);
    const stringFile2 = JSON.stringify(file2, undefined, 2);

    // write them to disk
    fs.writeFileSync(req.api, stringFile1);
    fs.writeFileSync(req.db, stringFile2);
}



//
// MARK: - function: get the coordinates
//
async function getGeocode(site) {

    // build the address
    const query = `${site.Site_streetaddress} ${site.Suburb } ${site.Site_postcode } ${site.Site_state} Australia`;


    // 1. skip any with existing coordinates
    if (site.latitude && site.longitude || site.skip) {
        console.error(`(✔) Skipping: ${ query }`);
        return Promise.resolve(site);
    }

    // 2. fetch only missing items
    console.log(`(⦿) Geocoding: ${ query }`);

    // url: Data Victoria API
    const url = new URL(
        'http://api.positionstack.com/v1/forward'
    );

    // url: parameters
    url.searchParams.append(
        'access_key', apiKey
    );

    url.searchParams.append(
        'region', 'Victoria'
    );

    url.searchParams.append(
        'country', 'AU'
    );

    url.searchParams.append(
        'limit', '1'
    );

    // build the search query
    url.searchParams.append(
        'query', query
    )

    // fetch the data
    return fetch(url)
        .then(res => res.json())
        .then(data => {

            const latitude = data.data(0).latitude;
            const longitude = data.data(0).longitude;
            const label = data.data(0).label;
            const confidence = data.data(0).confidence;

            return {
                ...site,
                confidence,
                latitude,
                longitude,
                label
            }
        })

    .catch(err => {
        console.log(`    (x) Failed: ${ query }`);
        return {...site,
            skip: true
        }
    });
}

//
// MARK: - success: if we got to here
//
init();

computational geometry – Efficient 2d interval merging product

Suppose I have two tables of 2d intervals with values attributed to each interval.

(d1_start, d1_end), (d2_start, d2_end), value
1,  3,   2, 5,  0.4
3,  9,    2, 5,  0.3
....

It is guaranteed that within a table no two rectangles (2d intervals) intersect.

Is there any efficient way to merge the two tables, where if there is overlap in the interval, keep the minimum value, otherwise keep value not changed? The partial overlap would lead to creating new separated records, the non-overlapping part keeps the old value, and the overlapping part keeps the minimum value.

Merging two B trees in O(h)

Given two B – trees A, B (from order of m) such thats: for each a in A and b in B: a > b
and also the number of elements in A is larger than the number of elements in B.

How can I merge them into one tree from the order of m, in O(h)
when “h” is the height of the A tree.

Need a catchy voice over merging your descriptions? you are in the right place for $20

Need someone to do voice over for your articles, feel free to contact for quality voice over merging your description.

by: kareanjohnsons
Created: —
Category: Voice Over
Viewed: 205


.

notebooks – Setting automatic dividers when merging groups

Imagine I have my code split into multiple sections, like so

enter image description here

Suppose I now select all cells and merge them, getting

enter image description here

Is it possible to automatic set a divider between the different code sections? I’m looking for something like

enter image description here

A code to apply customizable dividers to merged tagged cells would be ideal. Any ideas?

algorithms – Merging all adjacent and overlapping rectangles in a grid to bigger rectangles

I have a 𝑛×𝑚
rectangular grid of cells, and a set 𝑅 of rectangles within this grid. Each rectangle is a subset of the cells. (Alternatively, you can think of them as axis-aligned rectangles where each of the four corners has integer coordinates.) Some of these rectangles overlap and some of these rectangles are adjacent. I want to merge adjacent rectangles to bigger rectangles if possible and remove overlaps.

So my output set should have bigger non overlapping rectangles instead of small adjacent or overlapping rectangles. The number of rectangles in the output set should be minimized. The output set need not be a subset of input set.
Is there any approximate or exact algorithm for this problem.

git – Visual Studio Code not syncing with GitHub after merging branch

I merged a branch to the main branch in VSCode that’s synced with GitHub. The branch successfully merged without pushing uncommitted changes as expected but then I realized the branch needed to be deleted. So I committed those changes, merged the branch with main, then deleted it. Now GitHub has the correct files and stuff for the main branch but VSCode does not… what did I do?

PS I’m new to using GitHub

Basically, here’s one of my functions in VSCode:

def valid(play, current_card):
  if play(0) != current_card(0):
    if play(1) != current_card(1):
      return False
  else:
    return True

And this is what it should be (as it is in GitHub):

def valid(play, current_card):
  # if valid play
  if (play(0) in ("R", "G", "B", "Y")):
    if (len(play) == 2) and (play(0) == current_card(0) or play(2:) == current_card(2:)):
      # if regular card
      if (int(play(1)) < 9 and int(play(1)) > 0):
        return True
      elif play(1) == "X":
        return True
    elif len(play) == 3 and (play(1:2) in ("+2", "RV")):
      return True
  elif play == "W" or play == "W+4":
    return True
  return False

def find_card(card, hand):
  for i in range(len(hand)):
    if card in hand(i):
      return i

How can I fix this?

How to prevent iOS merging contacts with the same name

How to prevent iOS merging contacts with the same name – Ask Different

virtualhost – Apache sort order and preventing merging using the location directives in virtual server

I have two locations, one of which is a subset of the other. I would like the match to be either/or. However, in the more specific path, it appears to match both and merge the potential weblogic destinations. The desired behavior is it it matches /alpha/beta to go to location X. If it doesn’t match /alpha/beta, but still has the root of /alpha to go to location Y.

<VirtualHost *:10000>
ServerAdmin root@localhost
Options -Indexes -ExecCGI -FollowSymLinks -Includes -Multiviews -IncludesNoExec -SymLinksIfOwnerMatch
ServerName test-ws.amberroad.com

DebugConfigInfo OFF
Idempotent OFF
KeepAliveEnabled ON
WLIOTimeoutSecs 720
ConnectTimeoutSecs 720
WLSocketTimeoutSecs 720
ConnectRetrySecs 0

<Location /alpha>
WLSRequest On
WebLogicHost 10.10.10.10
WebLogicPort 10002

<Location /alpha/beta >
WLSRequest On
WebLogicCluster 10.10.10.10:10001,10.10.10.10:10000

ErrorLog “|/opt/freeware/sbin/rotatelogs /wslog/error_log.10000_%a 86400”
CustomLog “|/opt/freeware/sbin/rotatelogs /wslog/access_log.10000_%a 86400” common

python – Merging pages together into an append log

I’m scraping data from a paginated list on a website to be sent out as notifications.
For the most part we’ll only need to scape the first page and handle additions, deletions and duplicates.

  • Additions:
    All additions will be logged to an append only log.
    I also want notifications to be sent to me, and others.
    As such, I’m also returning the new data so the calling code can integrate with the other systems.

  • Deletions:
    Whilst deletions are very rare, I’d like the code to be prepared here.
    Deletions just mean we assume the side with more data is correct.
    So, programmatically, are handled in a similar way to additions – we assume the side with more data is correct.

  • Duplicates:
    Duplicate entries between the log we have and the page we’ve freshly scraped are very common.
    We don’t want duplicates to be added to the log, unless both side have a duplicate.

For example, say we have the following items in our log and in a fresh scrape:

local:  C C D E
scrape: A B C C E

We want to have the output to be:

A B C C D E

Notice how A and B were added, D was deleted and we normalize to 2, not 4, Cs and 1, not 2, Es.
And how all the data is in the same order.

Additionally the list has a couple of additional properties:

  • New items are always prepended to the start of the list.
    The code I’ve written can work well with both prepended and appended lists.
    By travelling from ‘head’ to ‘tail’ any additions to the data will mean we get a duplicate value on the next page.

    For example. If the list has two pages, and after we scrape the first page a new item is added:

    page 1  | page 2
    F E D C | B A
    

    We scrape FEDC, and a new item G is added to the list.

    page 1  | page 2
    G F E D | C B A
    

    You can see we scrape C again. However if we went against the list, we could have missed out on C.

  • Items are highly unlikely to be deleted.
    Deleting items causes the list to shift in the opposite direction.
    So we run into the issue we had before by travelling against the list.

    page 1  | page 2
    F E D C | B A
    

    We scrape FEDC, and the item E is removed.

    page 1  | page 2
    F D C B | A
    

    As you can see we can lose out on B.

Say we scrape the entire paginated list (not what the code does by default).
Each page with no overlaps to any other pages will be stored as is in NotificationLog.values.
Whilst we can assume each page carries on from the next, the code doesn’t by default.
Because additions or deletions to the paginated list can remove items from the list, depending on how we’re travelling through the data.
I decided erring on forcing the consumer to explicitly assume each page is connected – by adding an item on the previous page.
As such, we can scrape the list in any direction without much of a care for how pages have been mutated.
If we wait until an addition or deletion happens, then we can scape every page a second time ensuring we have any and all values between two pages.
And so everything will connect together through the overlap between the pages.

However appends are very common, and deletes are very, very rare.
So I’ve decided to (outside of the class) to lie and say I got one value of the previous page as part of the scrape.
So we only need one parse of each page.

I’ve written the following code to store the pages in memory and then log to a file.
I think the code is really complicated, and could be cleaned up.

Note: The code is like 99.9% stable, but isn’t 100% stable.
In the test test_zip_values__same_tail, provided below, we can see the output is tuplize((1, 2, 4, 5, 6, 3, 7), suffix=(0,)) rather than a stable tuplize((1, 2, 3, 4, 5, 6, 7), suffix=(0,)).
The situation is very, very rare so I’ve skipped implementing a secondary bi-stable merge for now.
Please skip over.

from __future__ import annotations

import os
from typing import IO, Iterator, Optional, TypeVar

from .log import LogValues

TTuple = TypeVar("TTuple", bound=tuple)  # type: ignore
AValues = list(list(TTuple))


class NotificationLog(LogValues(TTuple)):
    @staticmethod
    def _read_last_line(file: IO(bytes)) -> bytes:
        file.seek(-2, os.SEEK_END)
        while (
            file.read(1) != b"n"
            and file.seek(-2, os.SEEK_CUR)
        ):
            pass
        return file.readline()

    @classmethod
    def _read_values(cls, bytes: IO(bytes), encoding: str = "utf-8") -> AValues(TTuple):
        ret = cls._read_last_line(bytes).decode(encoding=encoding).rstrip("n")
        if not ret:
            return ()
        else:
            return (
                (
                    tuple(ret.split(","))  # type: ignore
                )
            )

    def add_values(self, values: list(TTuple), index: int = -1) -> Optional(list(TTuple)):
        """
        Add values into the log.

        The values should be a page from a paginate list.
        Any values added to the log we will be returned,
        otherwise ``None`` will be returned.

        ..  note::

            For best performance the paginated list should:

            -   prepend new items to the start of the list, and
            -   have low volatility - few/no deletes.

            And you should add one value from the previous page in ``values``.
        """
        values = list(self.zip_values(iter(values), iter(self.values), index))
        had_values = bool(self.values)
        self.values = list(self._add_values(values, iter(self.values)))
        if values is not self.values(-1):
            return None
        if had_values:
            values = values(-2::-1)
        for line in values:
            print(*line, sep=",", file=self.file)
        return values

    def _add_values(self, adds: list(TTuple), memories: Iterator(list(TTuple)), index: int = -1) -> Iterator(list(TTuple)):
        """
        Add new memory into memories.

        ``adds`` will be run through :meth:`zip_values` beforehand.
        As such, all existing memories which overlapped with ``adds`` will be included in ``adds``.
        We need to; remove all memories which overlap ``add``,
        and insert ``adds`` in the correct position.
        """
        add = adds(0)
        try:
            mems = next(memories)
            while add(index) < mems(0)(index):
                yield mems
                mems = next(memories)
            while mems(0) in adds:
                mems = next(memories)
        except StopIteration:
            yield adds
        else:
            yield adds
            yield mems
            yield from memories

    @staticmethod
    def zip_values(adds: Iterator(TTuple), memories: Iterator(list(TTuple)), index: int = -1) -> Iterator(TTuple):
        """
        Merge new values and pages together.

        Memories are descendingly ordered pages of values.
        The ``index``, defaulting to the last, is used to determine the order.
        The pages in memories are pages returned from a paginated list.
        Many sites use paginated lists, such as Google.
        Sometimes sites expose an 'infinate scroll',
        where the loads the next page automatically through JavaScript.

        Adds should be an ordered page to add to the log.
        If there are no overlaps between the ``adds`` and ``memories``
        then a new memory is added.
        Effectively adding the page as is into ``memories`` in the correct place.

        Once ``add`` reaches the final memory (the last value in the log),
        any additional data is ignored.
        The log should be opened in append mode and so we can only write to the end.
        :class:`NotificationLog` is not suitable for lists with random insertions.

        In prepended and low volatility lits, to merge pages into the log ASAP;
        always include one value from the previous page.
        The code is designed to correctly handle deleted data,
        from both ``add`` and ``memories``.
        As such the previous value can be the first, second, ..., or last value.

        ..  warn::

            If:
            
            -   new items to the list are **appended** to the **end** of the list, or
            -   if the list is highly susceptible to item deletion.

            **Do not** include a value from the previous page.
            Doing so can cause _data loss_ and will require an additional tool to recover from.

        """
        def take_same_index(start: TTuple, it: Iterator(TTuple)) -> tuple(Optional(TTuple), list(TTuple)):
            values = (start)
            for value in it:
                if value(index) != start(index):
                    return value, values
                else:
                    values.append(value)
            return None, values

        add: Optional(TTuple)
        mem: Optional(TTuple)
        try:
            add = next(adds)
        except StopIteration:
            return
        while True:
            try:
                mems_ = next(memories)
            except StopIteration:
                yield add
                yield from adds
                return
            if mems_(-1)(index) <= add(index):
                break
        mems = iter(mems_)
        while True:
            try:
                mem = next(mems)
                while mem(index) < add(index):
                    yield add
                    add = next(adds)
            except StopIteration:
                return
            try:
                while True:
                    if mem(index) == add(index):
                        if mem == add:
                            yield add
                            try:
                                add = next(adds)
                            except StopIteration:
                                add = None
                                raise
                            mem = next(mems)
                        else:
                            add, _adds = take_same_index(add, adds)
                            mem, _mems = take_same_index(mem, mems)
                            # TODO: Replace with a bi-stable algorithm
                            # Probably never going to need a bi-stable algorithm
                            yield from _mems
                            for _add in _adds:
                                if _add not in _mems:
                                    yield _add
                            if mem is None:
                                return
                            if add is None:
                                yield mem
                                raise StopIteration()
                    elif mem(index) < add(index):
                        yield add
                        try:
                            add = next(adds)
                        except StopIteration:
                            add = None
                            yield mem
                            raise
                    else:
                        yield mem
                        mem = next(mems)
            except StopIteration:
                yield from mems
                if add is None:
                    return
            try:
                mems = iter(next(memories))
            except StopIteration:
                return

I have a fairly comprehensive test suite.

import io
from typing import Any

import pytest

from loglet import NotificationLog

VALUES = (
    ("1", "081041"),
    ("5", "081324"),
    ("5", "081327"),
)
STRING = "".join(
    ",".join(values) + "n"
    for values in VALUES
)
BYTES = STRING.encode('utf-8')


@pytest.fixture
def log():
    return NotificationLog.from_streams(
        io.StringIO(STRING),
        io.BytesIO(BYTES),
    )


@pytest.fixture
def log_empty():
    return NotificationLog(
        io.StringIO(""),
        (),
    )


def test_read_last_line__empty_log():
    output = NotificationLog._read_last_line(io.BytesIO(b""))
    assert output == b""


def test_read_last_line__one_no_newline():
    output = NotificationLog._read_last_line(io.BytesIO(b"foo"))
    assert output == b"foo"


def test_read_last_line__one_log():
    output = NotificationLog._read_last_line(io.BytesIO(b"foon"))
    assert output == b"foon"


def test_read_last_line__two_log():
    output = NotificationLog._read_last_line(io.BytesIO(b"foonbarn"))
    assert output == b"barn"


def test_read_values__empty_log():
    output = NotificationLog._read_values(io.BytesIO(b""))
    assert output == ()


def test_read_values__empty_newline_log():
    output = NotificationLog._read_values(io.BytesIO(b"n"))
    assert output == ()


def test_read_values__one_log():
    output = NotificationLog._read_values(io.BytesIO(b"foon"))
    assert output == ((("foo",)))


def test_read_values__two_log():
    output = NotificationLog._read_values(io.BytesIO(b"foonbarn"))
    assert output == ((("bar",)))


def tuplize(values, *, prefix: tuple(Any, ...) = (), suffix: tuple(Any, ...) = ()) -> list(tuple(Any, ...)):
    try:
        return (
            prefix
            + (
                value
                if isinstance(value, tuple) else
                (value,)
            )
            + suffix
            for value in values
        )
    except TypeError:
        return (prefix + (values,))


def test_zip_values__empty_adds():
    output = list(NotificationLog.zip_values(
        iter(tuplize(())),
        iter((
            tuplize(3),
        )),
    ))
    assert output == tuplize(())


def test_zip_values__empty_mems():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter(()),
    ))
    assert output == tuplize(range(9, -1, -2))


def test_zip_values__basic_final_extra_add_eq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(3),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3))


def test_zip_values__basic_final_extra_add_neq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(4),
        )),
    ))
    assert output == tuplize((9, 7, 5, 4))


def test_zip_values__basic_final_extra_mems_eq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, 4, -2))),
        iter((
            tuplize((5, 3, 1)),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3, 1))


def test_zip_values__basic_final_extra_mems_neq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, 4, -2))),
        iter((
            tuplize((6, 4, 2, 0)),
        )),
    ))
    assert output == tuplize((9, 7, 6, 5, 4, 2, 0))


def test_zip_values__basic_stem_extra_add_eq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(3),
            tuplize(-10),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3, 1))


def test_zip_values__basic_stem_extra_add_neq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(4),
            tuplize(-10),
        )),
    ))
    assert output == tuplize((9, 7, 5, 4, 3, 1))


def test_zip_values__basic_stem_extra_mems_eq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, 4, -2))),
        iter((
            tuplize((5, 3, 1)),
            tuplize(-10),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3, 1))


def test_zip_values__basic_stem_extra_mems_neq():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, 4, -2))),
        iter((
            tuplize((6, 4, 2, 0)),
            tuplize(-10),
        )),
    ))
    assert output == tuplize((9, 7, 6, 5, 4, 2, 0))


def test_zip_values__basic_multiple_equal():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(7),
            tuplize(5),
            tuplize(3),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3))


def test_zip_values__basic_multiple_unequal():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(8),
            tuplize(6),
            tuplize(4),
        )),
    ))
    assert output == tuplize((9, 8, 7, 6, 5, 4))


def test_zip_values__basic_multiple_chunk_aligned():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize((7, 6, 5)),
            tuplize(4),
        )),
    ))
    assert output == tuplize((9, 7, 6, 5, 4))


def test_zip_values__basic_multiple_chunk_misaligned():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize((8, 7, 6)),
            tuplize(4),
        )),
    ))
    assert output == tuplize((9, 8, 7, 6, 5, 4))


def test_zip_values__basic_multiple_extra():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(-10),
            tuplize(-11),
        )),
    ))
    assert output == tuplize(range(9, -1, -2))


def test_zip_values__basic_head_overlap():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize((10, 9, 8)),
            tuplize(-11),
        )),
    ))
    assert output == tuplize((10, 9, 8, 7, 5, 3, 1))


def test_zip_values__basic_head_skip():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(20),
            tuplize((10, 9, 8)),
            tuplize(-11),
        )),
    ))
    assert output == tuplize((10, 9, 8, 7, 5, 3, 1))


def test_zip_values__basic_head_skip():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(-11),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3, 1))


def test_zip_values__basic_head_skip():
    output = list(NotificationLog.zip_values(
        iter(tuplize(range(9, -1, -2))),
        iter((
            tuplize(-11),
        )),
    ))
    assert output == tuplize((9, 7, 5, 3, 1))


def test_zip_values__same_tail():
    output = list(NotificationLog.zip_values(
        iter(
            tuplize((1, 3, 4, 5, 7), suffix=(0,)),
        ),
        iter((
            tuplize((1, 2, 4, 5, 6), suffix=(0,)),
        )),
    ))
    assert output == tuplize((1, 2, 4, 5, 6, 3, 7), suffix=(0,))


def test_zip_values__same_tail_add():
    output = list(NotificationLog.zip_values(
        iter(
            tuplize((1, 3, 4, 5, 7), suffix=(1,)),
        ),
        iter((
            tuplize((1, 2, 4, 5, 6), suffix=(1,))
            + tuplize((0), suffix=(0,)),
        )),
    ))
    assert output == (
        tuplize((1, 2, 4, 5, 6, 3, 7), suffix=(1,))
        + tuplize((0), suffix=(0,))
    )


def test_zip_values__same_mid():
    output = list(NotificationLog.zip_values(
        iter(
            tuplize((1, 3, 4, 5, 7), suffix=(1,))
            + tuplize((0), suffix=(0,)),
        ),
        iter((
            tuplize((1, 2, 4, 5, 6), suffix=(1,))
            + tuplize((0), suffix=(0,)),
        )),
    ))
    assert output == (
        tuplize((1, 2, 4, 5, 6, 3, 7), suffix=(1,))
        + tuplize((0), suffix=(0,))
    )


def test_zip_values__same_mid():
    output = list(NotificationLog.zip_values(
        iter(
            tuplize((1, 3, 4, 5, 7), suffix=(1,))
            + tuplize((0), suffix=(0,)),
        ),
        iter((
            tuplize((1, 2, 4, 5, 6), suffix=(1,))
            + tuplize((1), suffix=(0,)),
        )),
    ))
    assert output == (
        tuplize((1, 2, 4, 5, 6, 3, 7), suffix=(1,))
        + tuplize((1, 0), suffix=(0,))
    )


def test__add_values__head_mem(log: NotificationLog):
    adds = tuplize((3, 2, 1))
    mems = list(log._add_values(
        adds,
        iter((
            tuplize((9, 8, 7)),
            tuplize((6, 5, 4)),
        )),
    ))
    assert (
        mems
        == (
            tuplize((9, 8, 7)),
            tuplize((6, 5, 4)),
            tuplize((3, 2, 1)),
        )
    )
    assert mems(-1) is adds


def test__add_values__overwrite_mem(log: NotificationLog):
    adds = tuplize((3, 2, 1))
    mems = list(log._add_values(adds, iter((adds))))
    assert mems == (adds)
    assert mems(-1) is adds


def test__add_values__overwrite_mems(log: NotificationLog):
    adds = tuplize((6, 5, 4))
    mems = list(log._add_values(
        adds,
        iter((
            tuplize((9, 8, 7)),
            tuplize((6, 5, 4)),
            tuplize((3, 2, 1)),
        )),
    ))
    assert (
        mems
        == (
            tuplize((9, 8, 7)),
            tuplize((6, 5, 4)),
            tuplize((3, 2, 1)),
        )
    )


def test__add_values__tail_mem(log: NotificationLog):
    adds = tuplize((9, 8, 7))
    mems = list(log._add_values(
        adds,
        iter((
            tuplize((6, 5, 4)),
            tuplize((3, 2, 1)),
        )),
    ))
    assert (
        mems
        == (
            tuplize((9, 8, 7)),
            tuplize((6, 5, 4)),
            tuplize((3, 2, 1)),
        )
    )


HEAD_BUG_VALUES = (
    ("2", "181243"),
    ("2", "181241"),
    ("5", "181215"),
    ("5", "181201"),
    ("4", "181141"),
    ("4", "181141"),
    ("1", "180951"),
    ("1", "180948"),
    ("5", "172330"),
    ("7", "171721"),
    ("5", "171437"),
    ("5", "171211"),
    ("4", "170758"),
    ("4", "170757"),
    ("5", "162042"),
    ("6", "161510"),
    ("6", "161509"),
    ("6", "161507"),
    ("6", "161506"),
    ("6", "161505"),
    ("6", "161505"),
    ("5", "161347"),
    ("5", "161346"),
    ("4", "161155"),
    ("4", "161155"),
    ("1", "160744"),
    ("5", "151239"),
    ("7", "150418"),
    ("4", "141945"),
    ("4", "141944"),
    ("5", "141632"),
    ("9", "141532"),
    ("4", "141219"),
    ("5", "141149"),
    ("8", "141115"),
    ("8", "141114"),
    ("8", "141114"),
    ("8", "141113"),
    ("8", "141113"),
    ("8", "141113"),
    ("8", "141112"),
    ("8", "141112"),
    ("5", "132307"),
    ("5", "132303"),
    ("3", "132039"),
    ("3", "132039"),
    ("3", "132039"),
    ("4", "131502"),
    ("4", "131501"),
)


def test_add_values__head_bug(log: NotificationLog):
    assert log.add_values(HEAD_BUG_VALUES) is None


_CONNECTED_VALUES = VALUES + (
    ("5", "082047"),
    ("5", "082312"),
)


def test_add_values__connected_values(log: NotificationLog):
    assert (
        log.add_values(_CONNECTED_VALUES)
        == (
            ('5', '082047'),
            ('5', '081327'),
            ('5', '081324'),
            ('1', '081041'),
        )
    )


def test_zip_values__head_bug(log: NotificationLog):
    assert list(log.zip_values(iter(HEAD_BUG_VALUES), iter((VALUES)))) == HEAD_BUG_VALUES

DreamProxies - Cheapest USA Elite Private Proxies 100 Cheap USA Private Proxies Buy 200 Cheap USA Private Proxies 400 Best Private Proxies Cheap 1000 USA Private Proxies 2000 USA Private Proxies 5000 Cheap USA Private Proxies ExtraProxies.com - Buy Cheap Private Proxies Buy 50 Private Proxies Buy 100 Private Proxies Buy 200 Private Proxies Buy 500 Private Proxies Buy 1000 Private Proxies Buy 2000 Private Proxies ProxiesLive.com Proxies-free.com New Proxy Lists Every Day Proxies123.com Best Quality USA Private Proxies