# default_exp fdic

Todo:

  • Wrap as Function
sql = """ with tbl AS ( select (sum( case when csa_present then 1 else 0 end)::numeric * 1000 )/the_pop as result, a.csa from vital_signs.match_csas_and_bc_by_geom(' economy.banks_2017', 'gid', 'the_geom') a left join economy.banks_2017 b on a.gid = b.gid group by a.csa, the_pop ) select * from tbl where 1=1 """

Whats Inside?:

Indicators Used

  • ❌ 149 - banks - (banks) Percentage of residential tax lien sales
! pip install -U -q PyDrive ! pip install geopy ! pip install geopandas ! pip install geoplot!apt install libspatialindex-dev !pip install rtree!pip install dataplay%%capture # These imports will handle everything import os import sys import csv import matplotlib.pyplot as plt import numpy as np import pandas as pd import geopandas as gpd from geopandas import GeoDataFrame import psycopg2 import pyproj from pyproj import Proj, transform # conda install -c conda-forge proj4 from shapely.geometry import Point from shapely import wkb from shapely.wkt import loads # https://pypi.org/project/geopy/ from geopy.geocoders import Nominatim # In case file is KML, enable support import fiona fiona.drvsupport.supported_drivers['kml'] = 'rw' fiona.drvsupport.supported_drivers['KML'] = 'rw'from IPython.display import clear_output clear_output(wait=True)import ipywidgets as widgets from ipywidgets import interact, interact_manual

TPOP CSA and Baltimore

Get Baltimore

Click to toggle csa = "https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/Tpop/FeatureServer/0/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson" csa = gpd.read_file(csa); csa.head(1)

Get CSA

url2 = "https://services1.arcgis.com/mVFRs7NF4iFitgbY/ArcGIS/rest/services/Tpop/FeatureServer/1/query?where=1%3D1&outFields=*&returnGeometry=true&f=pgeojson" csa2 = gpd.read_file(url2); csa2['CSA2010'] = csa2['City_1'] csa2['OBJECTID'] = 56 csa2 = csa2.drop(columns=['City_1']) csa2.head()

Append do no append Bcity. We put it on the Bottom of the df because when performing the ponp it returns only the last matching columns CSA Label.

# csa = pd.concat([csa2, csa], ignore_index=True) csa = csa.append(csa2).reset_index(drop=True)csa.head(3)csa.tail(3)csa.head()

Process Banks

lsyear = "19"original = gpd.read_file("BCityBanks_20"+year+"_CSACity.shp");original.rename(columns={ 'CSA':'CSA2010', 'BaltCity':'InBaltimore'}, inplace=True) df = original[ original['CSA2010'].notnull() | original['InBaltimore'].notnull() ]print('After filtering records where a CSA or Baltimore geo-code match Exists') print( 'All rows Before Filter: ', original.shape[0] ) # rows, columns print( '# w BCity.isnull: ', df.InBaltimore.isnull().sum() ); bmorow = df[ df.CSA2010.isnull() ].shape[0] print( '# w CSA2010.isnull: ', bmorow ); csarow = df[ df.CSA2010.notnull() ].shape[0] print( '# w CSA2010.notnull: ', csarow ); print( '# rows After Filter: ', df.shape[0],'==',csarow,'+',bmorow,'==', csarow + bmorow); # add baltimore city df.CSA2010 = df.CSA2010.fillna('Baltimore City')banks = df.copy() banks.head(1)

Create Banks

#export banks = gpd.read_file('BCityBanks_2019_CSACity.shp') banks = banks.fillna('Baltimore City') banks['count'] = 1 banks= banks[['CSA','count']] banks = banks.groupby('CSA').sum(numeric_only=True) # Make sure ALL csas and BaltimoreCity are included. among other things banks = csa[ ['CSA2010','tpop10'] ].merge( banks, left_on='CSA2010', right_on='CSA', how='outer' ) # Update the baltimore CSA. banks.at[55,'count'] = banks['count'].sum() banks.head(3) banks.tail(3)#export # Create the Indicator banks['banks19'] = banks['count'] * 1000 / banks['tpop10'] banks.to_csv('149-banks-19.csv', index=False) banks.head(68)

SEARCH

CONNECT WITH US

DONATE

Help us keep this resource free and available to the public. Donate now!

Donate to BNIA-JFI

CONTACT US

Baltimore Neighborhood Indicators Alliance
The Jacob France Institute
1420 N. Charles Street, Baltimore, MD 21201
410-837-4377 | bnia-jfi@ubalt.edu