Code Snippets

Convert Google Contacts CSV to Mutt Aliases   python

import csv
import pinyin
csv_file = '~/google.csv'
alias_file = '~/alias.txt'
def zfc2string(zfc):
    string = pinyin.get(zfc, format='strip').replace(' ', '')
    return string
with open(csv_file) as f:
    f_csv = csv.reader(f)
    headers = next(f_csv)
    for row in f_csv:
        name = zfc2string(row[0]).lower()
        first_name = row[1]
        last_name = row[3]
        email = row[28]
        if not name or not first_name or not email:
            print('Skipped: [name]=' + name + '; [email]=' + email)
        with open(alias_file, 'a+') as f:
            f.write('alias' + '\t')
            f.write(name + '\t')
            if last_name:
                f.write(' ' + last_name + '\t')
            f.write('<' + email + '>' + '\n')

Python解析Kindle读书笔记   python

import re
import codecs
EOR = "=========="
def parse(filename):
    clip_file = open(filename, 'r')
    record = list()
    for line in clip_file:
        if line.strip() == EOR:
            assert record[2] == '', "None-blank line expected separating the header from the body of the clippin:%s" % record[2]
            clip = dict()
            match = re.match( r'(.*?) \((.*)\)$', record[0] )
            clip['title'], clip['author'] = match.groups()
            match = re.match( r'- (\w+) Loc. ([^|]+)\| Added on (\w+), (\w+ \d+, \d+), (\d+:\d+ \w\w)', record[1] )
            clip['type'], clip['location'], clip['dow'], clip['date'], clip['time'] = match.groups()
            clip['content'] = "\n".join( record[3:] )
            clip['title'] = clip['title'].strip()
            clip['author'] = clip['author'].strip()
            yield clip
            record = list()
            record.append( line.strip() )
if __name__ == '__main__':
    from sys import argv
    outfile = 'output.txt'
    f = open(outfile, 'a')
    for n, r in enumerate( parse(argv[1]) ):
        f.write(r['content'] + '\n\n')
        f.write('--- ' +
                r['author'] + ', ' +
                r['title'] + ', ' +
                'loc. ' + r['location'])

Perl来解析Kindle Clippings   perl

#!/usr/bin/perl -w
use warnings;
use strict;
my $MY_CLIPPINGS_FILE = "test.txt";
my $OUTPUT_FILE       = "output.txt";
# NOTE: the file is in UTF-8 format -- the first 3 bytes are
# a byte order mark: EF BB BF = 0xFEFF when decoded via UTF-8
open my $clips_fh, "<:encoding(utf8)", $MY_CLIPPINGS_FILE
        or die "Unable to open file $MY_CLIPPINGS_FILE: $!";
# since there might be UTF-8 encoded characters in the input, also output UTF-8
open my $output_fh, ">:utf8", $OUTPUT_FILE  # auto UTF-8 encoding on write
        or die "Unable to create output file $OUTPUT_FILE: $!";
binmode STDOUT, ":encoding(utf8)";
my %clips;
while(<$clips_fh>) {
        my $title = $_;
        $title =~ s/^\x{FEFF}//; # remove BOM
    $title =~ s/\r\n/\n/g;
        my $line = <$clips_fh>;
        my ($type, $location);
        if($line =~ /^-\s+(\w+)\s+Loc.\s+(\d+)/) {
                ($type, $location) = ($1, $2);
        } else {
        $type = "bookmark"
        if($type =~ /bookmark/i) {
        } else {
                while(1) {
                        $line = <$clips_fh>;
            $line =~ s/\r\n/\n/g;
                        next if $line =~ /^\s*$/;  # skip blank lines
                        last if $line =~ /^=======/;
                        if($type =~ /note/i) {
                                $clips{$title}{$location} .= '* ' .$line;
                        } else {
                                $clips{$title}{$location} .= '- ' .$line;
foreach my $title (sort keys %clips) {
        print $output_fh "$title" . ('-' x (length($title) - 1)) . "\n";
        foreach my $location (sort keys %{$clips{$title}}) {
                print $output_fh $clips{$title}{$location};
        print $output_fh "\n";
# ------------------------------------------
sub skip_entry {
        while(1) {
                my $line = <$clips_fh>;
                last if $line =~ /^=======/;

Elisp解析Kindle笔记   lisp

;;; repo:
(defun clip2org-get-next-book-as-list ()
  (let (title is-highlight header loc date page start end content)
    (setq start (point))
    (if (not (re-search-forward "==========" nil t 1))
        ;; Return nil
      (setq end (point))
      (goto-char start)
      (setq title (buffer-substring-no-properties
      ;; remove BOM (Byte-Order Marks)
      (setq title (replace-regexp-in-string "\u200f\\|\ufeff" "" title))
      (when (re-search-forward "Highlight on" end t 1)
        (setq is-highlight t))
      (when (re-search-forward "- \\(.*\\)|" end t 1)
        (setq header (match-string 1)))
      (when (re-search-forward "Page \\([0-9-]+\\)" end t 1)
        (setq page (match-string 1)))
      (when (re-search-forward "Location \\([0-9-]+\\)" end t 1)
        (setq loc (match-string 1)))
      (when (re-search-forward "Added on \\(.*\\)\n" end t 1)
        (setq date (match-string 1)))
      ;; From the end of date to ==========
      (if (re-search-forward
           "\n\\(.*?\\)\n==========" end t 1)
          (setq content (match-string 1)))
      (when (equal title "==========")
        (error "Clip2org: failed in getting content or quoted text."))
      ;; Return list
      (list title is-highlight page loc date content header))))
(defun clip2org-convert-to-org (clist)
  "Process clip2org-alist and generate the output."
  ;; Process headers of each book
  (while (caar clist)
    (princ (format "\n* %s\n" (caar clist)))
    (let ((note-list (cdar clist)))
      ;; Process each clipping
      (while (car note-list)
        (let* ((item (car note-list))
               (is-highlight (nth 0 item))
               (page (nth 1 item))
               (loc (nth 2 item))
               (date (nth 3 item))
               (content (nth 4 item)))
          (when is-highlight
            (princ (format "- %s\n" content))))
        (setq note-list (cdr note-list))))
    ;; Increment to the next book
    (setq clist (cdr clist))))
(defun clip2org-append-to-alist-key (key value alist)
  "Append a value to the key part of an alist. This function is
used to create associated lists. If Key is not found, add new key
to the list"
  (let ((templ) (results) (found))
    (while alist
      ;; check if key is already in list
      (if (equal (caar alist) key)
            (setq found t)
            (setq templ (list (nconc (car alist) (list value) templ)))
            ;; increment while loop
            (setq alist (cdr alist))
            ;; add/create to a new list
            (setq results (append results templ)))
          (setq results (append (list (car alist)) results))
          (setq alist (cdr alist)))))
    ;; add the new key/value to old list
    (if (not (eq found t))
        (setq results (append (list (list key value)) results)))
  (find-file clipfile)
  (goto-char (point-min))
  (let (clist (booklist (clip2org-get-next-book-as-list)))
    (while booklist
      (setq clist (clip2org-append-to-alist-key
                   (car booklist) (cdr booklist) clist))
      (setq booklist (clip2org-get-next-book-as-list)))
    (clip2org-convert-to-org clist))

GPAC源码编译的brew公式   ruby

可以参考 这里 的教程,注意除了克隆 GPAC项目本身 外,还需要一个 gpac_extra_libs ,它的源码包可以从 这里 下载;或者可以直接 这里 下载已经编译好的版本,不管是自己编译还是直接用这些文件,总之最后需要把生成的文件都拷贝到 gpac/extra_lib 目录下。最后按照下面的Formula用brew安装即可: brew install ./gpac.rb

class Gpac < Formula
  desc "Multimedia framework for research and academic purposes"
  homepage ""
  url ""
  sha256 "e5aa52e805a9ac56fdd2ea1f414b2fd49af5f3977b286f8737168e849587a131"
  head ""

  depends_on "faad2"
  depends_on "sdl"
  depends_on "pkg-config" => :build
  depends_on :x11 => :optional
  depends_on "a52dec" => :optional
  depends_on "jpeg" => :optional
  depends_on "libogg" => :optional
  depends_on "libvorbis" => :optional
  depends_on "mad" => :optional
  depends_on "theora" => :optional
  depends_on "ffmpeg" => :optional

  def install
    args = ["--prefix=#{prefix}",

    system "./configure", *args
    system "make"
    system "make", "install"

  test do
    system "#{bin}/MP4Box", "-add", test_fixtures("test.mp3"), "#{testpath}/out.mp4"
    File.exist? "#{testpath}/out.mp4"

Python moving average   python

import numpy
def smooth(x,window_len=11,window='hanning'):
    """smooth the data using a window with requested size.
    This method is based on the convolution of a scaled window with the signal.
    The signal is prepared by introducing reflected copies of the signal 
    (with the window size) in both ends so that transient parts are minimized
    in the begining and end part of the output signal.
        x: the input signal 
        window_len: the dimension of the smoothing window; should be an odd integer
        window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
            flat window will produce a moving average smoothing.

        the smoothed signal

    see also: 
    numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
    TODO: the window parameter could be the window itself if an array instead of a string
    NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
    if x.ndim != 1:
        raise ValueError, "smooth only accepts 1 dimension arrays."

    if x.size < window_len:
        raise ValueError, "Input vector needs to be bigger than window size."

    if window_len<3:
        return x
    if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
        raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"

    if window == 'flat': #moving average
    return y

Python插值   python

import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
x = np.arange(0, 10)
y = np.exp(-x/3.0)
plt.plot(x, y, 'ro')
xnew = np.arange(0, 9, 0.1)
# 'nearest', 'zero': 阶梯插值
# 'slinear': 线性插值
# 'quadratic': 2阶B样条插值
# 'cubic': 3阶B样条插值
for kind in ['nearest', 'zero', 'slinear', 'quadratic', 'cubic']:
    f = interpolate.interp1d(x, y, kind=kind)
    ynew = f(xnew)
    plt.plot(xnew, ynew, label=str(kind))
plt.legend(loc='higher right')

检测突变值   python

"""Cumulative sum algorithm (CUSUM) to detect abrupt changes in data."""

from __future__ import division, print_function
import numpy as np

__author__ = 'Marcos Duarte,'
__version__ = "1.0.4"
__license__ = "MIT"

def detect_cusum(x, threshold=1, drift=0, ending=False, show=True, ax=None):
    """Cumulative sum algorithm (CUSUM) to detect abrupt changes in data.
    x : 1D array_like
    threshold : positive number, optional (default = 1)
        amplitude threshold for the change in the data.
    drift : positive number, optional (default = 0)
        drift term that prevents any change in the absence of change.
    ending : bool, optional (default = False)
        True (1) to estimate when the change ends; False (0) otherwise.
    show : bool, optional (default = True)
        True (1) plots data in matplotlib figure, False (0) don't plot.
    ax : a matplotlib.axes.Axes instance, optional (default = None).
    ta : 1D array_like [indi, indf], int
        alarm time (index of when the change was detected).
    tai : 1D array_like, int
        index of when the change started.
    taf : 1D array_like, int
        index of when the change ended (if `ending` is True).
    amp : 1D array_like, float
        amplitude of changes (if `ending` is True).
    Tuning of the CUSUM algorithm according to Gustafsson (2000)[1]_:
    Start with a very large `threshold`.
    Choose `drift` to one half of the expected change, or adjust `drift` such
    that `g` = 0 more than 50% of the time.
    Then set the `threshold` so the required number of false alarms (this can
    be done automatically) or delay for detection is obtained.
    If faster detection is sought, try to decrease `drift`.
    If fewer false alarms are wanted, try to increase `drift`.
    If there is a subset of the change times that does not make sense,
    try to increase `drift`.
    Note that by default repeated sequential changes, i.e., changes that have
    the same beginning (`tai`) are not deleted because the changes were
    detected by the alarm (`ta`) at different instants. This is how the
    classical CUSUM algorithm operates.
    If you want to delete the repeated sequential changes and keep only the
    beginning of the first sequential change, set the parameter `ending` to
    True. In this case, the index of the ending of the change (`taf`) and the
    amplitude of the change (or of the total amplitude for a repeated
    sequential change) are calculated and only the first change of the repeated
    sequential changes is kept. In this case, it is likely that `ta`, `tai`,
    and `taf` will have less values than when `ending` was set to False.
    See this IPython Notebook [2]_.
    .. [1] Gustafsson (2000) Adaptive Filtering and Change Detection.
    .. [2] h
    >>> from detect_cusum import detect_cusum
    >>> x = np.random.randn(300)/5
    >>> x[100:200] += np.arange(0, 4, 4/100)
    >>> ta, tai, taf, amp = detect_cusum(x, 2, .02, True, True)
    >>> x = np.random.randn(300)
    >>> x[100:200] += 6
    >>> detect_cusum(x, 4, 1.5, True, True)
    >>> x = 2*np.sin(2*np.pi*np.arange(0, 3, .01))
    >>> ta, tai, taf, amp = detect_cusum(x, 1, .05, True, True)

    x = np.atleast_1d(x).astype('float64')
    gp, gn = np.zeros(x.size), np.zeros(x.size)
    ta, tai, taf = np.array([[], [], []], dtype=int)
    tap, tan = 0, 0
    amp = np.array([])
    # Find changes (online form)
    for i in range(1, x.size):
        s = x[i] - x[i-1]
        gp[i] = gp[i-1] + s - drift  # cumulative sum for + change
        gn[i] = gn[i-1] - s - drift  # cumulative sum for - change
        if gp[i] < 0:
            gp[i], tap = 0, i
        if gn[i] < 0:
            gn[i], tan = 0, i
        if gp[i] > threshold or gn[i] > threshold:  # change detected!
            ta = np.append(ta, i)    # alarm index
            tai = np.append(tai, tap if gp[i] > threshold else tan)  # start
            gp[i], gn[i] = 0, 0      # reset alarm

    # Estimation of when the change ends (offline form)
    if tai.size and ending:
        _, tai2, _, _ = detect_cusum(x[::-1], threshold, drift, show=False)
        taf = x.size - tai2[::-1] - 1
        # Eliminate repeated changes, changes that have the same beginning
        tai, ind = np.unique(tai, return_index=True)
        ta = ta[ind]
        # taf = np.unique(taf, return_index=False)  # corect later
        if tai.size != taf.size:
            if tai.size < taf.size:
                taf = taf[[np.argmax(taf >= i) for i in ta]]
                ind = [np.argmax(i >= ta[::-1])-1 for i in taf]
                ta = ta[ind]
                tai = tai[ind]
        # Delete intercalated changes (the ending of the change is after
        # the beginning of the next change)
        ind = taf[:-1] - tai[1:] > 0
        if ind.any():
            ta = ta[~np.append(False, ind)]
            tai = tai[~np.append(False, ind)]
            taf = taf[~np.append(ind, False)]
        # Amplitude of changes
        amp = x[taf] - x[tai]

    if show:
        _plot(x, threshold, drift, ending, ax, ta, tai, taf, gp, gn)

    return ta, tai, taf, amp

def _plot(x, threshold, drift, ending, ax, ta, tai, taf, gp, gn):
    """Plot results of the detect_cusum function, see its help."""

        import matplotlib.pyplot as plt
    except ImportError:
        print('matplotlib is not available.')
        if ax is None:
            _, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 6))

        t = range(x.size)
        ax1.plot(t, x, 'b-', lw=2)
        if len(ta):
            ax1.plot(tai, x[tai], '>', mfc='g', mec='g', ms=10,
            if ending:
                ax1.plot(taf, x[taf], '<', mfc='g', mec='g', ms=10,
            ax1.plot(ta, x[ta], 'o', mfc='r', mec='r', mew=1, ms=5,
            ax1.legend(loc='best', framealpha=.5, numpoints=1)
        ax1.set_xlim(-.01*x.size, x.size*1.01-1)
        ax1.set_xlabel('Data #', fontsize=14)
        ax1.set_ylabel('Amplitude', fontsize=14)
        ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
        yrange = ymax - ymin if ymax > ymin else 1
        ax1.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
        ax1.set_title('Time series and detected changes ' +
                      '(threshold= %.3g, drift= %.3g): N changes = %d'
                      % (threshold, drift, len(tai)))
        ax2.plot(t, gp, 'y-', label='+')
        ax2.plot(t, gn, 'm-', label='-')
        ax2.set_xlim(-.01*x.size, x.size*1.01-1)
        ax2.set_xlabel('Data #', fontsize=14)
        ax2.set_ylim(-0.01*threshold, 1.1*threshold)
        ax2.axhline(threshold, color='r')
        ax1.set_ylabel('Amplitude', fontsize=14)
        ax2.set_title('Time series of the cumulative sums of ' +
                      'positive and negative changes')
        ax2.legend(loc='best', framealpha=.5, numpoints=1)

简单的Markdown页面样式   markdown

html {
    margin: 0 1em;
body {
    margin: 0 auto;
    word-wrap: break-word;
    max-width: 700px;
    font-size: 110%;
    line-height: 1.5;
/* image */
figure {
    text-align: center;
img {
    max-width: 85%;
/* code */
pre {
    overflow-x: auto;
    word-wrap: normal;
    background-color: #f9f9f9;
    padding: 0.8em;
/* blockquote */
blockquote {
    color: #777;
    font-style: italic;
    padding-left: 0.8em;
    border-left: 0.3em solid #ccc;
/* table */
table, th, td {
    margin: 0px auto;
    border: 1px solid grey;
th, td {
    padding: 0.3em;

Python来格式化输出json文件   python json

"""Command-line tool to validate and pretty-print JSON


    $ echo '{"json":"obj"}' | python -m json.tool
        "json": "obj"
    $ echo '{ 1.2:3.4}' | python -m json.tool
    Expecting property name enclosed in double quotes: line 1 column 3 (char 2)

import argparse
import collections
import json
import sys

def main():
    prog = 'python -m json.tool'
    description = ('A simple command line interface for json module '
                   'to validate and pretty-print JSON objects.')
    parser = argparse.ArgumentParser(prog=prog, description=description)
    parser.add_argument('infile', nargs='?', type=argparse.FileType(),
                        help='a JSON file to be validated or pretty-printed')
    parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
                        help='write the output of infile to outfile')
    parser.add_argument('--sort-keys', action='store_true', default=False,
                        help='sort the output of dictionaries alphabetically by key')
    options = parser.parse_args()

    infile = options.infile or sys.stdin
    outfile = options.outfile or sys.stdout
    sort_keys = options.sort_keys
    with infile:
            if sort_keys:
                obj = json.load(infile)
                obj = json.load(infile,
        except ValueError as e:
            raise SystemExit(e)
    with outfile:
        json.dump(obj, outfile, sort_keys=sort_keys, indent=4, ensure_ascii=False)

if __name__ == '__main__':


1107,Metroid: Zero Mission,,8.62793915424818,Main Game,"[""2015-Mar-12 (Australia [AU])"", ""2015-Mar-12 (Europe [EU])"", ""2016-Jan-14 (North America [NA])"", ""2014-Jun-19 (Japan [JP])"", ""2004-Mar-19 (Australia [AU])"", ""2004-Feb-09 (North America [NA])"", ""2004-Apr-08 (Europe [EU])"", ""2004-May-27 (Japan [JP])""]","[""Game Boy Advance"", ""Wii U""]","[""Adventure"", ""Platform"", ""Shooter""]","[""Action"", ""Science fiction""]","[""Nintendo Research & Development 1"", ""Nintendo""]",
7346,The Legend of Zelda: Breath of the Wild,,9.26063084572708,Main Game,"[""2017-Mar-03 (Worldwide [WW])"", ""2017-Mar-03 (Worldwide [WW])""]","[""Nintendo Switch"", ""Wii U""]","[""Adventure"", ""Role-playing (RPG)"", ""Sport""]","[""Action"", ""Fantasy"", ""Open world""]","[""Nintendo EPD"", ""Nintendo"", ""Nintendo of America"", ""Nintendo of Europe""]",
1041,The Legend of Zelda: Oracle of Ages,,8.03766831129613,Main Game,"[""2013-May-30 (Australia [AU])"", ""2013-May-30 (Europe [EU])"", ""2013-May-30 (North America [NA])"", ""2013-Feb-27 (Japan [JP])"", ""2001-Oct-05 (Europe [EU])"", ""2001-May-14 (North America [NA])"", ""2001-Feb-27 (Japan [JP])""]","[""Game Boy Color"", ""Nintendo 3DS""]","[""Adventure"", ""Role-playing (RPG)""]","[""Action""]","[""Capcom"", ""Nintendo""]",
(defun parse-csv-string-rows (data separator quote-char line-sep)
  "Parse a separated and quoted string DATA into a list of list of strings.
Uses SEPARATOR as the column seperator, QUOTE-CHAR as the
string quoting character, and LINE-SEP as the line separator."
  (let ((items '())
        (lines '())
        (offset 0)
        (rawlines (if line-sep (split-string data line-sep) (list data)))
        (line "")
        (current-word "")
        (state :read-word))
    (catch 'return
        (setq line (pop rawlines))
         (when (or (not line) (= offset (length line)))
           ;; all done
           (cl-ecase state
              (if rawlines; have more lines
                    (setq offset 0)
                    (setq current-word (concat current-word line-sep))
                    (setq line (pop rawlines)))
                (error "Unterminated string")))
              ;; new line!
              (push (nreverse (cons current-word items)) lines)
              (if rawlines
                    (setq current-word "")
                    (setq items '())
                    (setq offset 0)
                    (setq line (pop rawlines)))
                (throw 'return
                       (nreverse lines))))))
         ;; handle empty line
         (if (= 0 (length line))
             (cl-ecase state
                (setq offset 0)
                (setq current-word (concat current-word line-sep))
                (setq line (pop rawlines)))
                ;; new line!
                (push (nreverse (cons current-word items)) lines)
                (setq offset 0)
                (setq line (pop rawlines))))
         (let ((current (aref line offset)))
            ((char-equal separator current)
             (cl-ecase state
                (setq current-word (concat current-word (char-to-string current))))
                (push current-word items)
                (setq current-word ""))))
            ((char-equal quote-char current)
             (cl-ecase state
                (let ((offset+1 (1+ offset)))
                   ((and (/= offset+1 (length line))
                         (char-equal quote-char (aref line offset+1)))
                    (setq current-word (concat current-word (char-to-string quote-char)))
                    (cl-incf offset))
                   (t (setq state :read-word)))))
                (setq state :in-string))))
             (setq current-word (concat current-word (char-to-string current))))))
         (cl-incf offset)))))))

  (princ "<table>\n<thead><tr><th>Title</th><th>ID</th></tr></thead>\n")
  (insert-file-contents csvfile)
  (let ((csvlines (cdr (split-string (buffer-string) "\n" t))))
    (dolist (csvline csvlines)
      (let* ((csvcols (car (parse-csv-string-rows csvline ?\, ?\" nil)))
             (game-id (nth 0 csvcols))
             (game-name (nth 1 csvcols))
             (game-url (nth 2 csvcols)))
        (princ (format "<tr><td>%s</td><td><a href=\"%s\">%s</a></td></tr>\n" game-name game-url game-id)))))
  (princ "</tbody></table>"))

Import to Jumsoft Money from MoneyWiz

import pandas as pd

mw = pd.read_csv("moneywiz.csv")
jumsoft = pd.DataFrame(columns=["Date", "Payee", "Category", "Amount", "Balance", "Type", "Account", "Tags", "Notes", "Currency", "Currency Rate"])

def conv_date(date):
    dl = date.split("/")
    m = dl[0][1:] if dl[0][0] == '0' else dl[0]
    d = dl[1][1:] if dl[1][0] == '0' else dl[1]
    return m + "/" + d + "/" + dl[2][2:]

for i, row in mw.iterrows():
    mdict = {}
    if not pd.isnull(row['Name']):
    isexpense = True if row['Amount'][0] == '-' else False
    mdict['Date'] = conv_date(row['Date'])
    mdict['Payee'] = row['Payee']
    if not pd.isnull(row['Transfers']):
        mdict['Category'] = "Transfers to:" + row['Transfers'] if isexpense else "Transfers from:" + row['Transfers']
        mdict['Category'] = row['Category']
    mdict['Amount'] = row['Amount'].replace(",", "")
    mdict['Balance'] = row['Balance'].replace(",", "")
    mdict['Type'] = "Expense" if isexpense else "Income"
    mdict['Account'] = row['Account']
    mdict['Tags'] = row['Tags']
    mdict['Notes'] = row['Description']
    mdict['Currency'] = row['Currency']
    mdict['Currency Rate'] = "1"
    # Tranfers
    jumsoft = jumsoft.append(mdict, ignore_index=True)
jumsoft.to_csv("/Users/jiaxi/Desktop/j.csv", index=False)
Edited by Isaac Gu on 2019-07-16 Tue 09:36