Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
"""Add Reference.source
Revision ID: c92f30c03b62
Revises: 56ddeb75114e
Create Date: 2016-06-02 18:12:08.511811
"""
from __future__ import unicode_literals
# revision identifiers, used by Alembic.
revision = 'c92f30c03b62'
down_revision = u'56ddeb75114e'
from alembic import op
import sqlalchemy as sa
from sqlalchemy import sql
def upgrade():
# We want to add a NOT NULL column without default value. So we first add
# the column without the constraint, then populate it, then add the
# constraint.
# Since a live deployment might be adding entries with a NULL value for
# the new column, even *during* this migration, we postpone adding the
# constraint to a later release where we are sure that no old version of
# the codebase is running that might do such a thing.
connection = op.get_bind()
# https://bitbucket.org/zzzeek/alembic/issue/89/opadd_column-and-opdrop_column-should
context = op.get_context()
if context.bind.dialect.name == 'postgresql':
has_reference_source_type = context.bind.execute(
"select exists (select 1 from pg_type "
"where typname='reference_source')").scalar()
if not has_reference_source_type:
op.execute("CREATE TYPE reference_source AS ENUM ('ncbi', 'ncbi_slice', 'lrg', 'url', 'upload')")
# Columns `source` and `source_data` will make `geninfo_identifier`,
# `slice_*`, and `download_url` obsolete.
op.add_column('references', sa.Column(
'source', sa.Enum('ncbi', 'ncbi_slice', 'lrg', 'url', 'upload', name='reference_source'),
nullable=True))
op.add_column('references', sa.Column(
'source_data', sa.String(length=255), nullable=True))
# Inline table definition we can use in this migration.
references = sql.table(
'references',
sql.column('id', sa.Integer()),
sql.column('accession', sa.String(20)),
sql.column('source', sa.Enum('ncbi', 'ncbi_slice', 'lrg', 'url', 'upload', name='reference_source')),
sql.column('source_data', sa.String(255)),
sql.column('geninfo_identifier', sa.String(13)),
sql.column('slice_accession', sa.String(20)),
sql.column('slice_start', sa.Integer()),
sql.column('slice_stop', sa.Integer()),
sql.column('slice_orientation', sa.Enum('forward', 'reverse', name='slice_orientation')),
sql.column('download_url', sa.String(255)))
# Get all rows.
result = connection.execute(
references.select().with_only_columns([
references.c.id,
references.c.accession,
references.c.source,
references.c.source_data,
references.c.geninfo_identifier,
references.c.slice_accession,
references.c.slice_start,
references.c.slice_stop,
references.c.slice_orientation,
references.c.download_url]))
# Generate parameter values for the UPDATE query below.
def update_params(r):
data = None
if r.source:
source = r.source
data = r.source_data
if r.accession.startswith('LRG_'):
source = 'lrg'
elif r.slice_accession:
source = 'ncbi_slice'
data = '{}:{}:{}:{}'.format(r.slice_accession, r.slice_start, r.slice_stop, r.slice_orientation)
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
elif r.download_url:
source = 'url'
data = r.download_url
elif r.geninfo_identifier:
source = 'ncbi'
else:
source = 'upload'
return {'r_id': r.id, 'r_source': source, 'r_source_data': data}
# Process a few rows at a time, since they will be read in memory.
while True:
chunk = result.fetchmany(1000)
if not chunk:
break
# Populate `source` and `source_data` based on existing column values.
statement = references.update().where(
references.c.id == sql.bindparam('r_id')
).values({'source': sql.bindparam('r_source'),
'source_data': sql.bindparam('r_source_data')})
# Execute UPDATE query for fetched rows.
connection.execute(statement, [update_params(r) for r in chunk])
op.create_index('reference_source_data', 'references', ['source', 'source_data'], unique=False)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('reference_source_data', table_name='references')
op.drop_column('references', 'source_data')
op.drop_column('references', 'source')
# https://bitbucket.org/zzzeek/alembic/issue/89/opadd_column-and-opdrop_column-should
context = op.get_context()
if context.bind.dialect.name == 'postgresql':
op.execute('DROP TYPE IF EXISTS reference_source')
### end Alembic commands ###