Manipulating dataframes in python

From raju

Contents

Creating a dataframe

Create a data frame from two lists

On the interpreter

>>> import pandas as pd
>>> df = pd.DataFrame({'letters': ['a', 'b', 'c', 'd'], 'numbers': [1,2,3,4]})
>>> df
  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4

In the code

% cat create_df_from_lists.py
import pandas as pd
letters = ['a', 'b', 'c']
words = ['apple', 'ball', 'cat']
df = pd.DataFrame(
    {'letter': letters,
     'word': words})

print("letters:\n", letters, "\n")
print("words:\n", words, "\n")
print("df:\n", df, "\n")

Sample run:

 % python3 -u create_df_from_lists.py
letters:
 ['a', 'b', 'c'] 

words:
 ['apple', 'ball', 'cat'] 

df:
   letter   word
0      a  apple
1      b   ball
2      c    cat

Create a dataframe from list of lists

>>> import pandas as pd
>>> data = [[0, 1, 0, 1], [1, 0, 1, 1], [0, 1, 1, 1]]
>>> df = pd.DataFrame(data)
>>> df
   0  1  2  3
0  0  1  0  1
1  1  0  1  1
2  0  1  1  1

You can also specify a list of columns for the dataframe.

>>> columns = ['a', 'b', 'c', 'd']
>>> df = pd.DataFrame(data, columns=columns)
>>> df
   a  b  c  d
0  0  1  0  1
1  1  0  1  1
2  0  1  1  1

To specify the index

>>> df = pd.DataFrame([[1,2,3,4,15,6],['1','2','3','4','F']], index=['CT','NY'])
>>> df
    0  1  2  3   4    5
CT  1  2  3  4  15  6.0
NY  1  2  3  4   F  NaN

Create a dataframe from a list of dictionaries

>>> import pandas as pd
>>> d = [{'points': 50, 'time': '5:00', 'year': 2010}, 
... {'points': 25, 'time': '6:00', 'month': "february"}, 
... {'points':90, 'time': '9:00', 'month': 'january'}, 
... {'points_h1':20, 'month': 'june'}]
>>> df = pd.DataFrame(d)
>>> print(df)
      month  points  points_h1  time    year
0       NaN    50.0        NaN  5:00  2010.0
1  february    25.0        NaN  6:00     NaN
2   january    90.0        NaN  9:00     NaN
3      june     NaN       20.0   NaN     NaN

Ref: http://stackoverflow.com/questions/20638006/convert-list-of-dictionaries-to-dataframe

Create a dataframe from list of tuples

>>> import pandas as pd
>>> data = [(0, 1, 0, 1), (1, 0, 1, 1), (0, 1, 1, 1)]
>>> df = pd.DataFrame(data)
>>> df
   0  1  2  3
0  0  1  0  1
1  1  0  1  1
2  0  1  1  1

You can also specify a list of columns for the dataframe.

>>> columns=('a', 'b', 'c', 'd')
>>> df = pd.DataFrame(data, columns=columns)
>>> df
   a  b  c  d
0  0  1  0  1
1  1  0  1  1
2  0  1  1  1


Create a dataframe from a dictionary

>>> import pandas as pd
>>> d1 = {'key':1, 'foo':2, 'bar':3}
>>> d1
{'key': 1, 'foo': 2, 'bar': 3}
>>> pd.DataFrame([d1])
   bar  foo  key
0    3    2    1


Create a dataframe from dictionary of dictionaries

Say we have a dictionary of dictionaries of the form:

{'user':{movie:rating} }

For example,

{Jill': {'Avenger: Age of Ultron': 7.0,
         'Django Unchained': 6.5,
         'Gone Girl': 9.0,
         'Kill the Messenger': 8.0}
'Toby': {'Avenger: Age of Ultron': 8.5,
         'Django Unchained': 9.0,
         'Zoolander': 2.0}}

To convert it into dataframe

>>> d1 = {'Jill': {'Django Unchained': 6.5, 'Gone Girl': 9.0, 'Kill the Messenger': 8.0, 'Avenger: Age of Ultron': 7.0},
...       'Toby': {'Django Unchained': 9.0, 'Zoolander': 2.0, 'Avenger: Age of Ultron': 8.5}}

>>> import pandas as pd
>>> pd.DataFrame(d1)
                        Jill  Toby
Avenger: Age of Ultron   7.0   8.5
Django Unchained         6.5   9.0
Gone Girl                9.0   NaN
Kill the Messenger       8.0   NaN
Zoolander                NaN   2.0

>>> pd.DataFrame.from_dict(d1)
                        Jill  Toby
Avenger: Age of Ultron   7.0   8.5
Django Unchained         6.5   9.0
Gone Girl                9.0   NaN
Kill the Messenger       8.0   NaN
Zoolander                NaN   2.0

>>> pd.DataFrame.from_dict(d1, orient='index')
      Django Unchained  Gone Girl  Kill the Messenger  Avenger: Age of Ultron  \
Jill               6.5        9.0                 8.0                     7.0
Toby               9.0        NaN                 NaN                     8.5

      Zoolander
Jill        NaN
Toby        2.0

Create a dataframe from two series

To import the series as rows

>>> import pandas as pd
>>> import numpy as np
>>> s1 = pd.Series([1, 2, 3, 5, 8, 9, 0, np.nan, 7, np.nan]); s2 = pd.Series([0, 1, 2, 3, 4, 5, 6, 8, np.nan, np.nan])
>>> pd.DataFrame([s1, s2])
     0    1    2    3    4    5    6    7    8   9
0  1.0  2.0  3.0  5.0  8.0  9.0  0.0  NaN  7.0 NaN
1  0.0  1.0  2.0  3.0  4.0  5.0  6.0  8.0  NaN NaN

To import them as columns

>>> pd.DataFrame({'s1': s1, 's2': s2})
    s1   s2
0  1.0  0.0
1  2.0  1.0
2  3.0  2.0
3  5.0  3.0
4  8.0  4.0
5  9.0  5.0
6  0.0  6.0
7  NaN  8.0
8  7.0  NaN
9  NaN  NaN

Create a dataframe with row index

$python
Python 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> pd.DataFrame([{'foo': 1.1, 'bar': 2.2}])
   bar  foo
0  2.2  1.1
>>> pd.DataFrame([{'foo': 1.1, 'bar': 2.2}], index=['baz'])
     bar  foo
baz  2.2  1.1

Create an empty data frame

>>> import pandas as pd
>>> a = pd.DataFrame(None)
>>> a
Empty DataFrame
Columns: []
Index: []
>>> type(a)
<class 'pandas.core.frame.DataFrame'>

To check if the dataframe is empty

>>> a.empty
True

Create an empty dataframe with column names

>>> import pandas as pd
>>> a = pd.DataFrame(columns=['x', 'y', 'z'])
>>> a
Empty DataFrame
Columns: [x, y, z]
Index: []
>>> type(a)
<class 'pandas.core.frame.DataFrame'>

Check that it is an empty dataframe

>>> a.empty
True

Create a new dataframe with fewer columns

To select the foo and bar columns from all_df dataframe and create a new dataframe called df

df = all_df[['foo', 'bar']].copy()

Create a dataframe using numpy random numbers

In [1]:
df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
                          'foo', 'bar', 'foo', 'foo'],
                   'B' : ['one', 'one', 'two', 'three',
                          'two', 'two', 'one', 'three'],
                   'C' : np.random.randn(8),
                   'D' : np.random.randn(8)})

Out[1]:
     A      B         C         D
0  foo    one  0.262732  0.089163
1  bar    one -1.591000  0.646790
2  foo    two -0.912634 -0.737303
3  bar  three  0.417209  0.311601
4  foo    two  0.034521  0.679122
5  bar    two  0.328215  1.504696
6  foo    one  0.256409 -0.366747
7  foo  three -1.647533  0.509802

read dataframe from stdin

Creating a series

Create series with all NaN values

series2 = pd.Series(np.nan * np.ones(shape=series1.shape))

demonstrates | how to create an array of nan values using numpy functions

tags | series of size N, create a series of length N with NaN values

selection related

Select rows with multiple constraints

df[(df['foo'] >= FOO) & (df['bar'] <= BAR)]

where foo, bar are columns in dataframe df, FOO, BAR are some thresholds.

See also:

Select columns based on dtype

Use pd.select_dtypes() to decompose data based on its type. For example

def decompose_data(df):
    d = {}
    d['float'] = df.select_dtypes(include=[np.float])
    d['int']   = df.select_dtypes(include=[np.int])
    d['object'] = df.select_dtypes(include=[np.object])
    return d

Ref:-

tags | get all integer columns in a dataframe

select only some columns

  • To select columns
df2 = df1[['col1', 'col2']]

Ref:- http://pandas.pydata.org/pandas-docs/stable/indexing.html

select element by row and column label

Use

df.loc[row_label, col_label]

to select an element by row and column labels. For example,

>>> import pandas as pd
>>> df = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6], 'c': [1,2,3]})
>>> df
   a  b  c
0  1  4  1
1  2  5  2
2  3  6  3
>>> df.loc[1,'b']
5

The row labels can also be strings instead of integers.

>>> df = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6], 'c': [1,2,3]}, index=['x', 'y', 'z'])
>>> df
   a  b  c
x  1  4  1
y  2  5  2
z  3  6  3
>>> df.loc['y','b']
5

assignment related

append a string column in a chain

import pandas as pd
df = pd.DataFrame({'letters': ['a', 'b', 'c', 'd'], 'numbers': [1,2,3,4]})
print df
df2 = df.assign(alphanum='ALL')
print df2
print df

gives

  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4
  letters  numbers alphanum
0       a        1      ALL
1       b        2      ALL
2       c        3      ALL
3       d        4      ALL
  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4

Note:- the assign operation does not change the original dataframe.

append an integer column in a chain

import pandas as pd
df = pd.DataFrame({'letters': ['a', 'b', 'c', 'd'], 'numbers': [1,2,3,4]})
print df
df2 = df.assign(alphanum=int(float('12.2')))
print df2
print df2.dtypes

gives

  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4
  letters  numbers  alphanum
0       a        1        12
1       b        2        12
2       c        3        12
3       d        4        12
letters     object
numbers      int64
alphanum     int64
dtype: object

append multiple columns in a chain

import pandas as pd
df = pd.DataFrame({'letters': ['a', 'b', 'c', 'd'], 'numbers': [1,2,3,4]})
print df
df2 = df.assign(alphanum=[1.2]*df.shape[0], beta=[5,6,7,8])
print df2

gives

  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4
  letters  numbers  alphanum  beta
0       a        1       1.2     5
1       b        2       1.2     6
2       c        3       1.2     7
3       d        4       1.2     8

You can also use

df2 = df.assign(alphanum=1.2, beta=[5,6,7,8])
print df2

which gives the same result.

Series related

Find the relative difference between two series

 % python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> import numpy as np
>>> s1 = pd.Series([1, 2, 3, 5, 8, 9, 0, np.nan, 7, np.nan]); s2 = pd.Series([0, 1, 2, 3, 4, 5, 6, 8, np.nan, np.nan])
>>> pd.DataFrame([s1, s2])
     0    1    2    3    4    5    6    7    8   9
0  1.0  2.0  3.0  5.0  8.0  9.0  0.0  NaN  7.0 NaN
1  0.0  1.0  2.0  3.0  4.0  5.0  6.0  8.0  NaN NaN
>>> s1/s2
0         inf
1    2.000000
2    1.500000
3    1.666667
4    2.000000
5    1.800000
6    0.000000
7         NaN
8         NaN
9         NaN
dtype: float64
>>> s1/s2 -1
0         inf
1    1.000000
2    0.500000
3    0.666667
4    1.000000
5    0.800000
6   -1.000000
7         NaN
8         NaN
9         NaN
dtype: float64

Missing data

stringify nans

>>> import pandas as pd
>>> import numpy as np
>>> a = pd.Series(['foo', np.nan, 'bar', np.nan])
>>> a
0    foo
1    NaN
2    bar
3    NaN
dtype: object
>>> a.fillna('baz')
0    foo
1    baz
2    bar
3    baz
dtype: object
>>> a
0    foo
1    NaN
2    bar
3    NaN
dtype: object

replace a string with NaN

% python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> import numpy as np
>>> a = pd.Series(['foo', np.nan, 'bar', np.nan])
>>> a.isnull()
0    False
1     True
2    False
3     True
dtype: bool
>>> b = a.fillna('baz')
>>> b
0    foo
1    baz
2    bar
3    baz
dtype: object
>>> c = b.replace('baz', np.nan, regex=True)
>>> c
0    foo
1    NaN
2    bar
3    NaN
dtype: object
>>> c.isnull()
0    False
1     True
2    False
3     True
dtype: bool

demonstrates | how to change a string to NaN

See also:- https://stackoverflow.com/questions/13445241/replacing-blank-values-white-space-with-nan-in-pandas

groupby with nan values

% python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'a': ['1', '2', '3'], 'b': ['4', np.NaN, '6']})
>>> df
   a    b
0  1    4
1  2  NaN
2  3    6
>>> df.groupby('b').groups
{'4': Int64Index([0], dtype='int64'), '6': Int64Index([2], dtype='int64')}
>>> df.fillna(-1).groupby('b').groups
{'4': Int64Index([0], dtype='int64'), '6': Int64Index([2], dtype='int64'), -1: Int64Index([1], dtype='int64')}

Ref:- https://stackoverflow.com/questions/18429491/groupby-columns-with-nan-missing-values

dummy

Fill a dataframe row by row

Consider the dataframe

$python
Python 3.6.1 |Anaconda 4.4.0 (64-bit)| (default, May 11 2017, 13:25:24) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> df = pd.DataFrame(columns=['a','b','c','d'], index=['x','y','z'])
>>> df
     a    b    c    d
x  NaN  NaN  NaN  NaN
y  NaN  NaN  NaN  NaN
z  NaN  NaN  NaN  NaN

To change the elements of an existing row

>>> df.loc['z'] = {'a':1, 'b':5, 'c':2, 'd':3}
>>> df
     a    b    c    d
x  NaN  NaN  NaN  NaN
y  NaN  NaN  NaN  NaN
z    1    5    2    3

To add a new row with an index

>>> df.loc['p'] = {'a':3, 'b':1, 'c':4, 'd':2}
>>> df
     a    b    c    d
x  NaN  NaN  NaN  NaN
y  NaN  NaN  NaN  NaN
z    1    5    2    3
p    3    1    4    2

The above method only works if you are assigning values to all the columns. For example

>>> df.loc['q'] = {'a':2, 'b':3}
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
  File "C:\ProgramData\Continuum\Anaconda\envs\py36\lib\site-packages\pandas\core\indexing.py", line 179, in __setitem__
    self._setitem_with_indexer(indexer, value)
  File "C:\ProgramData\Continuum\Anaconda\envs\py36\lib\site-packages\pandas\core\indexing.py", line 419, in _setitem_with_indexer
    raise ValueError("cannot set a row with "
ValueError: cannot set a row with mismatched columns

To do this, use instead

>>> df.loc['q', 'a'] = 2
>>> df
     a    b    c    d
x  NaN  NaN  NaN  NaN
y  NaN  NaN  NaN  NaN
z    1    5    2    3
p    3    1    4    2
q    2  NaN  NaN  NaN

>>> df.loc['q', 'b'] = 3
>>> df
     a    b    c    d
x  NaN  NaN  NaN  NaN
y  NaN  NaN  NaN  NaN
z    1    5    2    3
p    3    1    4    2
q    2    3  NaN  NaN

pretty print dataframe without index

df.to_string(index=False)

The default is to print the index

df.to_string()


For example

>>> import pandas as pd
>>> a = [2, -3, 4]; b = ['a', 'b', 'c']; c = [7, 4, 1]
>>> df = pd.DataFrame({'a':a, 'b':b, 'c':c})
>>> print(df)
   a  b  c
0  2  a  7
1 -3  b  4
2  4  c  1
>>> print(df.to_string(index=False))
a  b  c
2  a  7
-3  b  4
4  c  1

Ref:- https://stackoverflow.com/questions/24644656/how-to-print-dataframe-without-index

print either to file or to stdout

df.to_csv(out_file if out_file else sys.stdout,
          index=False)

print all values in a pandas series

give a name to the column index

df.index.name = 'foo'

Ref:- https://stackoverflow.com/questions/18022845/pandas-index-column-title-or-name

convert int64 YYYYMMDD to datetime64

df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')

extract first 8 characters of a column in a dataframe

>>> a['Date']
0    20160201.0
1    20160201.0
2    20160201.0
3    20160104.0
4    20160104.0
5    20160104.0
6    20161201.0
7    20161201.0
8    20161201.0
Name: Date, dtype: object
>>> a['Date'].str[:8]
0    20160201
1    20160201
2    20160201
3    20160104
4    20160104
5    20160104
6    20161201
7    20161201
8    20161201
Name: Date, dtype: object

Ref:- http://stackoverflow.com/questions/20970279/how-to-do-a-left-right-and-mid-of-a-string-in-a-pandas-dataframe

iterate over each column of a dataframe except one

cols = df.columns.tolist()
cols.remove('foo')
for col in cols:
    // do something

Iterate over each month

import pandas as pd

from pandas.tseries.offsets import *
for end_dt in pd.date_range('20160110', '20160920', freq='M'):
    begin_dt = end_dt + MonthBegin(n=-1)
    end_dt_yyyymmdd = end_dt.strftime('%Y%m%d')
    begin_dt_yyyymmdd = begin_dt.strftime('%Y%m%d')
    print(begin_dt_yyyymmdd, end_dt_yyyymmdd)

will produce

20160101 20160131
20160201 20160229
20160301 20160331
20160401 20160430
20160501 20160531
20160601 20160630
20160701 20160731
20160801 20160831

Using

pd.date_range('20160110', '20160930', freq='M')

will produce

20160101 20160131
20160201 20160229
20160301 20160331
20160401 20160430
20160501 20160531
20160601 20160630
20160701 20160731
20160801 20160831
20160901 20160930

Iterate over each quarter

import pandas as pd

from pandas.tseries.offsets import *
for end_dt in pd.date_range('20140101', '20160930', freq='Q')[::-1]:
    begin_dt = end_dt + MonthBegin(n=-3)
    end_dt_yyyymmdd = end_dt.strftime('%Y%m%d')
    begin_dt_yyyymmdd = begin_dt.strftime('%Y%m%d')
    print(begin_dt_yyyymmdd, end_dt_yyyymmdd)

will produce

20160701 20160930
20160401 20160630
20160101 20160331
20151001 20151231
20150701 20150930
20150401 20150630
20150101 20150331
20141001 20141231
20140701 20140930
20140401 20140630
20140101 20140331

iterate over each row of a dataframe

To iterate over each row of a dataframe, it is better to use DataFrame.itertuples() over DataFrame.iterrows() as explained in https://stackoverflow.com/a/41022840/6305733

call a function on each row of a dataframe

If you want to call a function that takes arguments from a row of a dataframe and repeat that for each row in the dataframe, see https://stackoverflow.com/questions/39814416/pandas-apply-with-args-which-are-dataframe-row-entries . Sample code

import pandas as pd
df = pd.DataFrame({'A':[1,2,3],
                   'B':[4,5,6]})

print (df)
   A  B
0  1  4
1  2  5
2  3  6

def myfunction(B, A):
    #some staff  
    result = B + A 
    # do something here to get the result
    return result

df['C'] = df.apply(lambda x: myfunction(x.B, x.A), axis=1)
print (df)
   A  B  C
0  1  4  5
1  2  5  7
2  3  6  9

or

def myfunction(x):

    result = x.B + x.A
    # do something here to get the result
    return result

df['C'] = df.apply(myfunction, axis=1)
print (df)
   A  B  C
0  1  4  5
1  2  5  7
2  3  6  9

using itertuples

DataFrame.itertuples() can be used to iterate over DataFrame rows as namedtuples, with index as first element of the tuple.

>>> import pandas as pd
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [0.1, 0.2]}, index=['a', 'b'])
>>> df
   col1  col2
a     1   0.1
b     2   0.2

>>> for row in df.itertuples():
...     print(row)
...
Pandas(Index='a', col1=1, col2=0.10000000000000001)
Pandas(Index='b', col1=2, col2=0.20000000000000001)

To print just the first element instead of printing all the elements

>>> g = df.itertuples()
>>> next(g, 'default')
Pandas(Index='a', col1=1, col2=0.10000000000000001)

Subsequent calls will print the next element or the default value if there are no elements left.

>>> next(g, 'default')
Pandas(Index='b', col1=2, col2=0.20000000000000001)
>>> next(g, 'default')
'default'

If the 'default' is not supplied, it will throw a StopIteration exception when there are no elements left.

>>> g = df.itertuples()
>>> next(g)
Pandas(Index='a', col1=1, col2=0.10000000000000001)
>>> next(g)
Pandas(Index='b', col1=2, col2=0.20000000000000001)
>>> next(g)
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
StopIteration

Extract some columns from a data frame and make a copy

One approach

new = old[['A', 'C', 'D']].copy()

Another approach is to use the filter function, which will create a copy by default:

new = old.filter(['A', 'B', 'D'])

The default is to filter by columns (axis=1). To filter by rows, use axis=0. For example:

new = old.filter(['foo', 'bar'], axis=0)

Ref:- https://stackoverflow.com/questions/34682828/pandas-extracting-specific-selected-columns-from-a-dataframe-to-new-dataframe

Build one column from another column

Consider

% python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> a = ['kama', 'raju']
>>> a
['kama', 'raju']
>>> b = ['foo_' + i + '_bar' for i in a]
>>> b
['foo_kama_bar', 'foo_raju_bar']

To do the same in a dataframe

>>> import pandas as pd
>>> df = pd.DataFrame({'a': ['kama', 'raju']})
>>> df
      a
0  kama
1  raju
>>> df['b'] = ['foo_' + i + '_bar' for i in df['a']]
>>> df
      a             b
0  kama  foo_kama_bar
1  raju  foo_raju_bar

Conditional assignment

>>> import pandas as pd
>>> df = pd.DataFrame({'letters': ['a', 'b', 'c', 'd'], 'numbers': [1,2,3,4]})
>>> df
  letters  numbers
0       a        1
1       b        2
2       c        3
3       d        4
>>> df['new'] = 'default'
>>> df
  letters  numbers      new
0       a        1  default
1       b        2  default
2       c        3  default
3       d        4  default
>>> df.loc[df['numbers'] > 2.5, 'new'] = 'b+'
>>> df
  letters  numbers      new
0       a        1  default
1       b        2  default
2       c        3       b+
3       d        4       b+

filter rows by conditions

Capture rows by conditioning on two columns

mask_foo = (df['foo'] == 'FOO') & (df['bar'] >= 100)
mask_sec = (df['foo'] == 'SEC') & (df['bar'] >= 500)
df2 = df[ (mask_foo | mask_sec) ]

Remove null entries and everything less than 500 in column foo

mask_foo = (pd.isnull(df['foo'])) | \
           (df['foo'] < 500)
df2 = df[~ mask]

Show rows where values in one column are missing and values in a different column equals something

df[ (pd.isnull(df['foo'])) & (df['bar'] == 'baz')]

tags | logical, missing values

Joining two dataframes in pandas

To merge data frames a and b on column 'foo' and store the result in a new data frame, m

import pandas as pd
...
m = pd.merge(a, b, on='foo')

Ref:-

Inner join multiple datraframes

Sample code to merge multiple dataframes on a bunch of columns and then renaming the columns.

cols=['foo', 'bar']
df = df1\
     .merge(df2, on=cols)\
     .merge(df3, on=cols)\
     .merge(df4, on=cols)\
     .rename(columns={'foo':'alpha',
                      'bar':'beta'})

Note:- df1 is not changed when you apply a merge on it.

Inner join two dataframes

df1 = pd.DataFrame({'a': [1,1,2,2,3,3], 'b':[0,1,2,3,4,5]})
df2 = pd.DataFrame({'a': [1,2,3], 'c':[2,4,6]})

print df1
print df2
df3 = df1.merge(df2, how='inner', on=['a'])
print df3
   a  b
0  1  0
1  1  1
2  2  2
3  2  3
4  3  4
5  3  5

   a  c
0  1  2
1  2  4
2  3  6

   a  b  c
0  1  0  2
1  1  1  2
2  2  2  4
3  2  3  4
4  3  4  6
5  3  5  6

Inner joining two dataframes

Note that when two dataframes are inner joined, the resulting dataframe can potentially be larger than both data frames. This can happen if there are multiple rows in either data frame over the "joint" columns. For example, consider

>>> import pandas as pd
>>> df1 = pd.DataFrame([[1, 3], [1, 4]], columns=['A', 'B'])
>>> df1
   A  B
0  1  3
1  1  4
>>> df2 = pd.DataFrame([[1, 5], [1, 6]], columns=['A', 'C'])
>>> df2
   A  C
0  1  5
1  1  6
>>> df3 = pd.merge(df1, df2, on='A', how='inner')
>>> df3
   A  B  C
0  1  3  5
1  1  3  6
2  1  4  5
3  1  4  6

which shows 4 rows in df3 even though it was created by inner joining two data frames that each have 2 rows.

If the duplicates are not expected, try cleaning the data using pd.drop_duplicates()

Ref:- http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.merge.html

Append two dataframes

>>> import pandas as pd
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
   A  B
0  1  2
1  3  4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df2
   A  B
0  5  6
1  7  8
>>> df_merge = df.append(df2, ignore_index=True)
>>> df_merge
   A  B
0  1  2
1  3  4
2  5  6
3  7  8

This will not modify df, df2.


By default, duplicates are not eliminated. Use drop_duplicates() for that.

>>> df3 = pd.DataFrame([[5, 6], [7, 8], [3, 4]], columns=list('AB'))
>>> df3
   A  B
0  5  6
1  7  8
2  3  4
>>> df_merge = df.append(df3, ignore_index=True)
>>> df_merge
   A  B
0  1  2
1  3  4
2  5  6
3  7  8
4  3  4
>>> df_merge = df.append(df3, ignore_index=True).drop_duplicates()
>>> df_merge
   A  B
0  1  2
1  3  4
2  5  6
3  7  8

Missing entries will be filled by NaN.

>>> df4 = pd.DataFrame([[5, 6, 7], [7, 8, 9]], columns=list('ABC'))
>>> df4
   A  B  C
0  5  6  7
1  7  8  9
>>> df_merge = df.append(df4, ignore_index=True).drop_duplicates()
>>> df_merge
   A  B    C
0  1  2  NaN
1  3  4  NaN
2  5  6  7.0
3  7  8  9.0

You can also use pd.concat()

>>> df_merge = pd.concat([df, df3], ignore_index=True).drop_duplicates()
>>> df_merge
   A  B
0  1  2
1  3  4
2  5  6
3  7  8

Append array of dataframes

master = pd.concat([pd.read_csv(file) for file in files])

Dump dataframe to a gzip file

https://github.com/KamarajuKusumanchi/sampleusage/blob/master/python/pandas/df_to_gzip.py

get duplicates

 % python3
Python 3.5.3rc1 (default, Jan  3 2017, 04:40:57) 
[GCC 6.3.0 20161229] on linux

>>> import pandas as pd
>>> a = pd.DataFrame({'isp': ['comcast', 'telmex', 'comcast'], 'country' : ['us', 'mexico', 'us']})
>>> a
  country      isp
0      us  comcast
1  mexico   telmex
2      us  comcast

>>> a[a.duplicated()]
  country      isp
2      us  comcast

Ref:- http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.duplicated.html


drop duplicates

>>> import pandas as pd
>>> df = pd.DataFrame([[1,2], [3,4], [5, 6], [7, 8], [5, 4]], columns=list('AB'))
>>> df
   A  B
0  1  2
1  3  4
2  5  6
3  7  8
4  5  4
>>> df.drop_duplicates()
   A  B
0  1  2
1  3  4
2  5  6
3  7  8
4  5  4
>>> df.drop_duplicates(subset=["B"])
   A  B
0  1  2
1  3  4
2  5  6
3  7  8
>>> df.drop_duplicates(subset=["B"], keep='last')
   A  B
0  1  2
2  5  6
3  7  8
4  5  4
>>> df.drop_duplicates(subset=["B"], keep=False)
   A  B
0  1  2
2  5  6
3  7  8

Ref:- http://pandas.pydata.org/pandas-docs/version/0.17.1/generated/pandas.DataFrame.drop_duplicates.html

drop duplicate columns with different column names

 % python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> df = pd.DataFrame({'a': [1,2,3], 'b': [4,5,6], 'c': [1,2,3]})
>>> df
   a  b  c
0  1  4  1
1  2  5  2
2  3  6  3
>>> df2 = df.T.drop_duplicates().T
>>> df2
   a  b
0  1  4
1  2  5
2  3  6

Note:- The following does not work since the duplicate columns do not have the same name.

>>> df3 = df.loc[:,~df.columns.duplicated()]
>>> df3
   a  b  c
0  1  4  1
1  2  5  2
2  3  6  3

delete columns in a dataframe

To delete one column

df = df.drop('column_name', 1)

where 1 is the axis number (0 for rows and 1 for columns.). The default is 0.

To delete a column in place

df.drop('column_name', axis=1, inplace=True)

To delete a column by number, e.g. the 1st, 2nd and 4th columns:

df.drop(df.columns[[0, 1, 3]], axis=1)  # df.columns is zero-based pd.Index

Ref:-

common and non-common columns between two dataframes

In [1]:
import pandas as pd

In [9]:
idx1 = pd.Index(['a3', 'a2', 'a1'])
idx2 = pd.Index(['a5', 'a3', 'a4'])
print(idx1)
print(idx2)

Out [9]:
Index([u'a3', u'a2', u'a1'], dtype='object')
Index([u'a5', u'a3', u'a4'], dtype='object')

In [10]:
idx1.intersection(idx2)

Out[10]:
Index([u'a3'], dtype='object')

In [12]:
idx2.difference(idx1)

Out[12]:
Index([u'a4', u'a5'], dtype='object')

Ref:-

mean response when predictor is nonzero

Consider

 % cat train.csv 
y,X0,X1,X2
4.8,a,0,1
8.8,a,1,1
7.6,b,0,1
8.1,b,1,1
7.8,b,0,0
9.3,c,1,0

where y is the response variable and X0, X1 and X2 are predictors. X1 and X2 are binary predictors (meaning they can either be 0 or 1), X0 is a categorical variable that can take values a, b, c. The idea here is to find the mean of the response variable when the predictor is true. For X0, we want to find the mean for each category.

Sample code

import pandas as pd
df_raw = pd.read_csv("train.csv")
print(df_raw)
df = pd.get_dummies(df_raw)
print(df)

ycol = 'y'
xcols = ['X1', 'X2', 'X0_a', 'X0_b', 'X0_c']
response = pd.DataFrame(columns=xcols, index=['mean', 'std', 'score'])
for xcol in xcols:
    mean_value = df.loc[df[xcol] == 1, ycol].mean()
    std_value = df.loc[df[xcol] == 1, ycol].std()
    score = mean_value/std_value
    response.loc['mean', xcol] = mean_value
    response.loc['std', xcol] = std_value
    response.loc['score', xcol] = score

print(response)

Sample output

     y X0  X1  X2
0  4.8  a   0   1
1  8.8  a   1   1
2  7.6  b   0   1
3  8.1  b   1   1
4  7.8  b   0   0
5  9.3  c   1   0
     y  X1  X2  X0_a  X0_b  X0_c
0  4.8   0   1     1     0     0
1  8.8   1   1     1     0     0
2  7.6   0   1     0     1     0
3  8.1   1   1     0     1     0
4  7.8   0   0     0     1     0
5  9.3   1   0     0     0     1
             X1       X2     X0_a      X0_b X0_c
mean    8.73333    7.325      6.8   7.83333  9.3
std    0.602771  1.75381  2.82843  0.251661  NaN
score   14.4886  4.17663  2.40416   31.1265  NaN

remove blank columns in a dataframe

To remove columns where all values are missing

in_file='input.csv'
out_file='output.csv'
df = pd.read_csv(in_file, dtype=object)
filter = (df['COL_FOO'] == 'bar')
df_small = df[filter].dropna(axis=1, how='all')
df_small.to_csv(out_file, sep=',', index=False)

Combine two dataframes by appending columns

df_all = pd.concat([df1, df2], axis=1)

Ref:- "Concatenating objects" section in https://pandas.pydata.org/pandas-docs/stable/merging.html

combine and separate columns

The idea here is to combine two columns of a dataframe into a tuple column and subsequently break it into separate columns.

Consider the following dataframe

 % python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> df = pd.DataFrame({'item':['item A', 'item B', 'item B', 'item C', 'item A'], 'value':[59, 95, 82, 40, 11]})
>>> 
>>> df
     item  value
0  item A     59
1  item B     95
2  item B     82
3  item C     40
4  item A     11

Combine the columns into a tuple and add it as a another column

>>> df['item_value'] = list(zip(df.item, df.value))
>>> df
     item  value    item_value
0  item A     59  (item A, 59)
1  item B     95  (item B, 95)
2  item B     82  (item B, 82)
3  item C     40  (item C, 40)
4  item A     11  (item A, 11)

To unpack the tuple into a new dataframe

>>> df2 = df['item_value'].apply(pd.Series)
>>> df2
        0   1
0  item A  59
1  item B  95
2  item B  82
3  item C  40
4  item A  11

To rename the columns while unpacking

>>> df2 = df['item_value'].apply(pd.Series).rename(columns={0:'new_item', 1:'new_value'})
>>> df2
  new_item  new_value
0   item A         59
1   item B         95
2   item B         82
3   item C         40
4   item A         11

Until now, the original dataframe is not changed.

>>> df
     item  value    item_value
0  item A     59  (item A, 59)
1  item B     95  (item B, 95)
2  item B     82  (item B, 82)
3  item C     40  (item C, 40)
4  item A     11  (item A, 11)

To unpack the tuples into the original dataframe itself

>>> df[['new_item', 'new_value']] = df['item_value'].apply(pd.Series)
>>> df
     item  value    item_value new_item  new_value
0  item A     59  (item A, 59)   item A         59
1  item B     95  (item B, 95)   item B         95
2  item B     82  (item B, 82)   item B         82
3  item C     40  (item C, 40)   item C         40
4  item A     11  (item A, 11)   item A         11

Ref:-


Remove columns that are all zero


read multiple csv files into a dataframe

import pandas as pd
all_files = ("file_1.txt", "file_2.txt")
dfg = (pd.read_csv(f, sep=',', low_memory=False) for f in all_files)
df = pd.concat(dfg, ignore_index=True)

another way

import pandas as pd
all_files = ("file_1.txt", "file_2.txt")
frames = []
for f in all_files:
    if not os.path.isfile(f):
        print("Warning: File", f, "does not exist. Skipping it.")
        continue
    cur_frame = pd.read_csv(f, low_memory=False)
    frames.append(cur_frame)
df = pd.concat(frames)

Ref:- http://stackoverflow.com/questions/20906474/import-multiple-csv-files-into-pandas-and-concatenate-into-one-dataframe

using to_timedelta function

>>> import pandas as pd
>>> from datetime import datetime
>>> dt = '20171103'
>>> offset = [-4, 3, 1]
>>> df = pd.DataFrame({'offset':offset})
>>> print(df)
   offset
0      -4
1       3
2       1
>>>
>>> df['date'] = datetime.strptime(dt, '%Y%m%d') + \
...              pd.to_timedelta(df['offset'], 'w')
>>> print(df)
   offset       date
0      -4 2017-10-06
1       3 2017-11-24
2       1 2017-11-10

demonstrates | timedelta operations on a column

number of dates between two time series

>>> a[['end_date', 'start_date']].head()
                  end_date           start_date
0  2016-09-30 00:00:00.000  2008-02-14 00:00:00
1  2016-09-30 00:00:00.000  2015-01-23 00:00:00
2  2016-09-30 00:00:00.000  2014-09-29 00:00:00
3  2016-09-30 00:00:00.000  2014-09-29 00:00:00
4  2016-09-30 00:00:00.000  2010-09-14 00:00:00

>>> age = (pd.to_datetime(a['end_date']) - pd.to_datetime(a['start_date']))
>>> type(age)
<class 'pandas.core.series.Series'>
>>> age.head()
0   3151 days
1    616 days
2    732 days
3    732 days
4   2208 days
dtype: timedelta64[ns]

To convert it to a number

>>> age = (pd.to_datetime(a['end_date']) - pd.to_datetime(a['start_date']))/np.timedelta64(1, 'D')
>>> type(age)
<class 'pandas.core.series.Series'>
>>> age.head()
0    3151.0
1     616.0
2     732.0
3     732.0
4    2208.0
dtype: float64

convert all values in a dataframe column to lowercase

 % python3
Python 3.5.3 (default, Jan 19 2017, 14:11:04) 
[GCC 6.3.0 20170118] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import pandas as pd
>>> import numpy as np
>>> df = pd.DataFrame({'a':['K', 'a', 'M', np.nan, 'A'], 'b': ['R', 'a', 'J', 'u', np.nan]})
>>> df
     a    b
0    K    R
1    a    a
2    M    J
3  NaN    u
4    A  NaN
>>> df['a'] = df['a'].str.lower()
>>> df
     a    b
0    k    R
1    a    a
2    m    J
3  NaN    u
4    a  NaN

convert the column names of a dataframe to lower case

df.rename(columns=lambda x: x.lower(), inplace=True)

Use case: While merging data from two data frames using DataFrame.merge(), I ended up with two columns with same name but differing in case (ex: foo from df1, FOO from df2). This caused problems when I tried to upload data into a hadoop cluster since hive is not case sensitive. As a work around, I converted the column names in df2 to lower case and then merged using pd.merge(df1, df2, ..., suffixes = ('_df1', '_df2')). The resulting data frame will now have foo_df1, foo_df2 columns.

count categories

>>> df2
     Department  Lottery  Literacy  Wealth Region
1         Aisne       38        51      22      N
2        Allier       66        13      61      C
3  Basses-Alpes       80        46      76      E
4  Hautes-Alpes       79        69      83      E
5       Ardeche       70        27      84      S
6      Ardennes       31        67      33      N
7        Ariege       75        18      72      S
8          Aube       28        59      14      E
9          Aude       50        34      17      S

>>> df2['Region'].value_counts()
S    3
E    3
N    2
C    1

cumulative sum

>>> import pandas as pd
>>> a = [2, -3, 4]; b = ['a', 'b', 'c']; c = [7, 4, 1]
>>> df = pd.DataFrame({'a':a, 'b':b, 'c':c})
>>> df
   a  b  c
0  2  a  7
1 -3  b  4
2  4  c  1
>>> df['d'] = df['a'].cumsum()
>>> df['e'] = df['b'].cumsum()
>>> df['f'] = df['c'].cumsum()
>>> df
   a  b  c  d    e   f
0  2  a  7  2    a   7
1 -3  b  4 -1   ab  11
2  4  c  1  3  abc  12

add a sequence of numbers as a column to dataframe


In [49]: df
Out[49]: 
        y
0  169.91
1  265.32
2  158.53
3  160.87
4  167.45
5  158.23
6  165.52
7  155.62

In [50]: df['rownum'] = range(1, df.shape[0]+1)

In [51]: df
Out[51]: 
        y  rownum
0  169.91       1
1  265.32       2
2  158.53       3
3  160.87       4
4  167.45       5
5  158.23       6
6  165.52       7
7  155.62       8

In [52]: df.drop('rownum', axis=1, inplace=True)

In [53]: df
Out[53]: 
        y
0  169.91
1  265.32
2  158.53
3  160.87
4  167.45
5  158.23
6  165.52
7  155.62

misc task

Task: Check if the values in a column of a dataframe exist among the values in a column of another dataframe. Add the result as a new column to the first data frame.

In [47]: a = pd.DataFrame({'pkg': ['kdegraphics-strigi-analyzer', 'kdesdk-strigi-plugins', 'libclucene-core1', 'libstreamanalyzer0', 'libzmq3']}); b = pd.DataFrame({'package': ['libzmq3', 'python3.4', 'kdesdk-strigi-plugins']})

In [48]: a
Out[48]: 
                           pkg
0  kdegraphics-strigi-analyzer
1        kdesdk-strigi-plugins
2             libclucene-core1
3           libstreamanalyzer0
4                      libzmq3

In [49]: b
Out[49]: 
                 package
0                libzmq3
1              python3.4
2  kdesdk-strigi-plugins

In [50]: a['exists'] = a['pkg'].isin(b['package'])

In [51]: a
Out[51]: 
                           pkg exists
0  kdegraphics-strigi-analyzer  False
1        kdesdk-strigi-plugins   True
2             libclucene-core1  False
3           libstreamanalyzer0  False
4                      libzmq3   True

unsorted

  • To print all column names in a data frame - df.columns.values
  • Number of missing values in a dataframe - df.isnull().sum()

experiment with get_dummies

>>> import pandas as pd
>>> df = pd.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'], 'C': [1, 2, 3]})
>>> df
   A  B  C
0  a  b  1
1  b  a  2
2  a  c  3

>>> pd.get_dummies(df)
   C  A_a  A_b  B_a  B_b  B_c
0  1  1.0  0.0  0.0  1.0  0.0
1  2  0.0  1.0  1.0  0.0  0.0
2  3  1.0  0.0  0.0  0.0  1.0
>>> pd.get_dummies(df, columns=['A'])
   B  C  A_a  A_b
0  b  1  1.0  0.0
1  a  2  0.0  1.0
2  c  3  1.0  0.0
>>> pd.get_dummies(df, columns=['B'])
   A  C  B_a  B_b  B_c
0  a  1  0.0  1.0  0.0
1  b  2  1.0  0.0  0.0
2  a  3  0.0  0.0  1.0
>>> pd.get_dummies(df, columns=['A', 'B'])
   C  A_a  A_b  B_a  B_b  B_c
0  1  1.0  0.0  0.0  1.0  0.0
1  2  0.0  1.0  1.0  0.0  0.0
2  3  1.0  0.0  0.0  0.0  1.0
>>> pd.get_dummies(df, columns=['B', 'A'])
   C  B_a  B_b  B_c  A_a  A_b
0  1  0.0  1.0  0.0  1.0  0.0
1  2  1.0  0.0  0.0  0.0  1.0
2  3  0.0  0.0  1.0  1.0  0.0

Ref:- http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.get_dummies.html

using at with multi index

df.[(key1, key2), 'col1']

hierarchical groupby

Consider the dataframe

d1 = pd.DataFrame( {'StudentID':    ["x1", "x10", "x2","x3", "x4", "x5", "x6",   "x7", "x8", "x9"],
 'StudentGender' : ['F', 'M', 'F', 'M', 'F', 'M', 'F', 'M', 'M', 'M'],
 'ExamenYear': ['2007','2007','2007','2008','2008','2008','2008','2009','2009','2009'],
 'Exam': ['algebra', 'stats', 'bio', 'algebra', 'algebra', 'stats', 'stats', 'algebra', 'bio', 'bio'],
 'Participated': ['no','yes','yes','yes','no','yes','yes','yes','yes','yes'],
 'Passed': ['no','yes','yes','yes','no','yes','yes','yes','no','yes']},
 columns = ['StudentID', 'StudentGender', 'ExamenYear', 'Exam', 'Participated', 'Passed'])

print d1
  StudentID StudentGender ExamenYear     Exam Participated Passed
0        x1             F       2007  algebra           no     no
1       x10             M       2007    stats          yes    yes
2        x2             F       2007      bio          yes    yes
3        x3             M       2008  algebra          yes    yes
4        x4             F       2008  algebra           no     no
5        x5             M       2008    stats          yes    yes
6        x6             F       2008    stats          yes    yes
7        x7             M       2009  algebra          yes    yes
8        x8             M       2009      bio          yes     no
9        x9             M       2009      bio          yes    yes

and the function

def ZahlOccurence_0(x):
     return pd.Series({'All': len(x['StudentID']),
                   'Part': sum(x['Participated'] == 'yes'),
                   'Pass' :  sum(x['Passed'] == 'yes')})

We can do groupby at multiple levels and add the results

t1 = d1.groupby(['ExamenYear', 'Exam']).apply(ZahlOccurence_0)   
t2 = d1.groupby('ExamenYear').apply(ZahlOccurence_0)
print t1
print t2
t3 = pd.concat([t1.reset_index(), t2.reset_index()], ignore_index=True)
print t3
t4 = t3.set_index(['ExamenYear', 'Exam'])
print t4
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
2008       algebra    2     1     1
           stats      2     2     2
2009       algebra    1     1     1
           bio        2     2     1
            All  Part  Pass
ExamenYear                 
2007          3     2     2
2008          4     3     3
2009          3     3     2
   All     Exam ExamenYear  Part  Pass
0    1  algebra       2007     0     0
1    1      bio       2007     1     1
2    1    stats       2007     1     1
3    2  algebra       2008     1     1
4    2    stats       2008     2     2
5    1  algebra       2009     1     1
6    2      bio       2009     2     1
7    3      NaN       2007     2     2
8    4      NaN       2008     3     3
9    3      NaN       2009     3     2
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
2008       algebra    2     1     1
           stats      2     2     2
2009       algebra    1     1     1
           bio        2     2     1
2007       NaN        3     2     2
2008       NaN        4     3     3
2009       NaN        3     3     2

When aggregating over all Exams for a given year, we can show a meaningful text instead of NaN.

t1 = d1.groupby(['ExamenYear', 'Exam']).apply(ZahlOccurence_0)   
t2 = d1.groupby('ExamenYear').apply(ZahlOccurence_0).assign(Exam='All').reset_index().set_index(['ExamenYear', 'Exam'])
print t1
print t2
t3 = pd.concat([t1.reset_index(), t2.reset_index()], ignore_index=True)
print t3
t4 = t3.set_index(['ExamenYear', 'Exam'])
print t4
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
2008       algebra    2     1     1
           stats      2     2     2
2009       algebra    1     1     1
           bio        2     2     1
                 All  Part  Pass
ExamenYear Exam                 
2007       All     3     2     2
2008       All     4     3     3
2009       All     3     3     2
  ExamenYear     Exam  All  Part  Pass
0       2007  algebra    1     0     0
1       2007      bio    1     1     1
2       2007    stats    1     1     1
3       2008  algebra    2     1     1
4       2008    stats    2     2     2
5       2009  algebra    1     1     1
6       2009      bio    2     2     1
7       2007      All    3     2     2
8       2008      All    4     3     3
9       2009      All    3     3     2
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
2008       algebra    2     1     1
           stats      2     2     2
2009       algebra    1     1     1
           bio        2     2     1
2007       All        3     2     2
2008       All        4     3     3
2009       All        3     3     2

To make the report hierarchical, we can assemble it by adding "All" Exam rows in between instead of at the end.


t1 = d1.groupby(['ExamenYear', 'Exam']).apply(ZahlOccurence_0)   
t2 = d1.groupby('ExamenYear').apply(ZahlOccurence_0).assign(Exam='All').reset_index().set_index(['ExamenYear', 'Exam'])
print t1
print t2
t1_group = t1.groupby(level=0)
t2_group = t2.groupby(level=0)
a=[]
for (i,j) in t1_group:
    a.append(t1_group.get_group(i).reset_index())
    a.append(t2_group.get_group(i).reset_index())
t3 = pd.concat(a, ignore_index=True).set_index(['ExamenYear', 'Exam'])
print t3
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
2008       algebra    2     1     1
           stats      2     2     2
2009       algebra    1     1     1
           bio        2     2     1
                 All  Part  Pass
ExamenYear Exam                 
2007       All     3     2     2
2008       All     4     3     3
2009       All     3     3     2
                    All  Part  Pass
ExamenYear Exam                    
2007       algebra    1     0     0
           bio        1     1     1
           stats      1     1     1
           All        3     2     2
2008       algebra    2     1     1
           stats      2     2     2
           All        4     3     3
2009       algebra    1     1     1
           bio        2     2     1
           All        3     3     2

Ref:- https://stackoverflow.com/questions/15641449/concatenate-dataframes-with-different-levels-of-index-in-pandas

tags | join dataframes with with different level of indices, concat dataframes with different index levels, append data, at the end of each group. concat dataframe at each group level, multiindex iterate on groups, method chaining assign variable name

call function on each group

grouped = df.groupby('column_foo')
frames = []
for id, df_id in grouped:
    new_df_id = df_id.func_bar()
    frames.append(new_df_id)
if not frames:
    new_df = pd.DataFrame(None)
else:
    new_df = pd.concat(frames)

return new_df

tags | groupby call function

Ref:- http://pandas.pydata.org/pandas-docs/stable/groupby.html

number of groups in a pandas groupby object

groups = df.groupby('foo')
ngroups = len(groups)

preserve formatting of columns

set dtype to object to preserve the formatting of the columns. This is useful if we want to dump data after adding or removing certain columns.

df = pd.read_csv(fname, dtype=object)

deprecated

  • DataFrame.sort is deprecated. Use sort_values instead.
myscript.py:57: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
  na_position='last')

Sum of values in a column when another column is 1

Consider the dataframe

>>> df = pd.DataFrame({'a':[1,1,2,1,2], 'b':[5,7,3,3,5]})
>>> df
   a  b
0  1  5
1  1  7
2  2  3
3  1  3
4  2  5

To get the sum of values of b when column a is 1

>>> df.loc[df['a'] == 1, 'b'].sum()
15

To get the sum of values of b when column a is 2

>>> df.loc[df['a'] == 2, 'b'].sum()
8

pandas.read_csv()

  • From pandas 0.19.2, you can pass a url directly to pandas.read_csv() .

Sample code - https://github.com/KamarajuKusumanchi/market_data_processor/blob/master/google_finance.py

  • To assign column names, use names = [list of column names]
pd.read_csv(fname, index_col=None, header=None, names = ['foo', 'bar'])

Ref:- https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html

Check if two dataframes are equal

To check if two dataframes are equal and ignore the order of rows & columns during comparison

from pandas.testing import assert_frame_equal
assert_frame_equal(result, expected, check_like=True)

Ref:-

SettingWithCopyWarning

Consider the following code

dff = df[['foo', 'bar', 'baz']]
dff['qux'] = df['qux'] if 'qux' in df else None

It throws a SettingWithCopyWarning saying

A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  dff['floor'] = df['floor'] if 'floor' in df else None

To fix it

dff = df.filter(['foo', 'bar', 'baz'])
dff['qux'] = df['qux'] if 'qux' in df else None

read a sheet in excel file

df = pd.read_excel('file.xlsx', 'sheet_name', na_values=['-', 'N/A', 'NA'])

unmerge cells when writing a dataframe

df = pd.read_csv('C:/Users/raju/x/foo.csv')

# Set index on the first two columns
df.set_index(list(df)[:2], inplace=True)

# By default, to_excel will write MultiIndex and Hierarchical Rows
# as merged cells. Use merge_cells=False to disable this behaviour.
df.to_excel('C:/Users/raju/x/foo.xlsx', sheet_name='myfoo', startrow=1, startcol=1, merge_cells=False)

Ref:- https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DataFrame.to_excel.html

build a dataframe with unique values from multiple columns

Select the columns of interest and call drop_duplicates() on it.

import pandas as pd
import numpy as np

df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
                          'foo', 'bar', 'foo', 'foo'],
                   'B' : ['one', 'one', 'two', 'three',
                          'two', 'two', 'one', 'three'],
                   'C' : np.random.randn(8),
                   'D' : np.random.randn(8)})
print(df)
     A      B         C         D
0  foo    one  1.200722 -0.171384
1  bar    one -0.662782 -0.226719
2  foo    two  0.790387  1.091735
3  bar  three  0.615051 -2.474762
4  foo    two  0.128955 -0.519028
5  bar    two -0.990671 -1.010521
6  foo    one  0.299682 -0.220049
7  foo  three -0.140584 -1.405962
uniq = df[['A', 'B']].drop_duplicates()
print(uniq)
     A      B
0  foo    one
1  bar    one
2  foo    two
3  bar  three
5  bar    two
7  foo  three

treat zero divided by zero as zero

tags | handle 0 by 0

In [1]:
import pandas as pd
import numpy as np

In [6]:
df = pd.DataFrame({'s1': [1.1, 0.5, 0, 0, 4.2, np.nan, np.nan],
                   's2': [2.2, 0, 0.7, 0, np.nan, 5.6, np.nan]})
print(df)

Out [6]:
    s1   s2
0  1.1  2.2
1  0.5  0.0
2  0.0  0.7
3  0.0  0.0
4  4.2  NaN
5  NaN  5.6
6  NaN  NaN

In [39]:
s1 = df['s1']; s2 = df['s2']
s3 = (s2.fillna(0)/s1.fillna(0) -1) * 100
​
mask_zero_by_zero = (s1.fillna(0) == 0) & (s2.fillna(0) == 0)
s4 = (s2.fillna(0)/s1.fillna(0) -1) * 100
s4[mask_zero_by_zero] = 0.0

df2 = pd.concat((df, pd.DataFrame({'s3':s3, 'mask_zero_by_zero': mask_zero_by_zero, 's4':s4})), axis=1)
print df2

Out [39]:
    s1   s2 mask_zero_by_zero          s3          s4
0  1.1  2.2             False  100.000000  100.000000
1  0.5  0.0             False -100.000000 -100.000000
2  0.0  0.7             False         inf         inf
3  0.0  0.0              True         NaN    0.000000
4  4.2  NaN             False -100.000000 -100.000000
5  NaN  5.6             False         inf         inf
6  NaN  NaN              True         NaN    0.000000

External links

Frequent stuff

Common use cases involving DataFrames

For a complete list, see http://pandas.pydata.org/pandas-docs/stable/api.html#index

Use case Solution See also
Get the number of rows and columns
  • rows = df.shape[0]
  • cols = df.shape[1]
  • (rows, cols) = df.shape
DataFrame.shape
Select rows when columns contain certain values
  • df[df['name'].isin(value_list)]
  • df[~df['name'].isin(value_list)]
Get N distinct values df['name'].unique()[:N] Series.unique
Get all distinct values df['name'].unique() Series.unique
Limit dataframe to N distinct values of a column
def limit_distinct(df, col, N):
    v = df[col].unique()[:N]
    return df[ df[col].isin(v) ]
df.pipe(limit_distinct, 'name', N)
summary stats of a column df['foo'].func() where func is something like

mean, sum, std, median, min, max

Set a string value to missing df['foo'].replace('bar', None)
select first 10 rows df[:10]

API of frequently used dataframe functions

Name link
pandas.read_csv http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html
pandas.merge http://pandas.pydata.org/pandas-docs/stable/generated/pandas.merge.html
df.replace http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html
df.rename https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rename.html